In [1]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
if ".txt" in filename:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
/kaggle/input/home-data/Houses Dataset/HousesInfo.txt
Import Statement¶
In [2]:
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
from collections.abc import Sequence
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn import metrics
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout, Conv2D,MaxPooling2D, Flatten
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import ModelCheckpoint
In [3]:
import seaborn as sns
import csv
import numpy as np
%matplotlib inline
import cv2
import glob
from IPython.display import Image
In [4]:
import tensorflow as tf
tf.test.gpu_device_name()
Out[4]:
'/device:GPU:0'
In [5]:
def encode_text_dummy(df, name):
dummies = pd.get_dummies(df[name])
for x in dummies.columns:
dummy_name = "{}-{}".format(name, x)
df[dummy_name] = dummies[x]
df.drop(name, axis=1, inplace=True)
def encode_numeric_zscore(df, name, mean=None, sd=None):
if mean is None:
mean = df[name].mean()
if sd is None:
sd = df[name].std()
df[name] = (df[name] - mean) / sd
def missing_median(df, name):
med = df[name].median()
df[name] = df[name].fillna(med)
def to_xy(df, target):
result = []
for x in df.columns:
if x != target:
result.append(x)
# find out the type of the target column.
target_type = df[target].dtypes
target_type = target_type[0] if isinstance(target_type, Sequence) else target_type
# Encode to int for classification, float otherwise. TensorFlow likes 32 bits.
if target_type in (np.int64, np.int32):
# Classification
dummies = pd.get_dummies(df[target])
return df[result].values.astype(np.float32), dummies.values.astype(np.float32)
else:
# Regression
return df[result].values.astype(np.float32), df[target].values.astype(np.float32)
def remove_outliers(df, name, sd):
drop_rows = df.index[(np.abs(df[name] - df[name].mean()) >= (sd * df[name].std()))]
df.drop(drop_rows, axis=0, inplace=True)
def encode_numeric_range(df, name, normalized_low=-1, normalized_high=1,
data_low=None, data_high=None):
if data_low is None:
data_low = min(df[name])
data_high = max(df[name])
df[name] = ((df[name] - data_low) / (data_high - data_low)) \
* (normalized_high - normalized_low) + normalized_low
def chart_regression(pred,y,sort=True):
t = pd.DataFrame({'pred' : pred, 'y' : y.flatten()})
if sort:
t.sort_values(by=['y'],inplace=True)
a = plt.plot(t['y'].tolist(),label='expected')
b = plt.plot(t['pred'].tolist(),label='prediction')
plt.ylabel('output')
plt.legend()
plt.show()
Data Preprocessing¶
Import Data¶
In [6]:
filename_read = '/kaggle/input/home-data/Houses Dataset/HousesInfo.txt'
# filename_read = './Houses Dataset/HousesInfo.txt'
In [7]:
cols=["Bedrooms","Bathrooms","area","zipcode","price"]
df = pd.read_csv(filename_read , sep=" ", header=None , names=cols)
In [8]:
df.head()
Out[8]:
| Bedrooms | Bathrooms | area | zipcode | price | |
|---|---|---|---|---|---|
| 0 | 4 | 4.0 | 4053 | 85255 | 869500 |
| 1 | 4 | 3.0 | 3343 | 36372 | 865200 |
| 2 | 3 | 4.0 | 3923 | 85266 | 889000 |
| 3 | 5 | 5.0 | 4022 | 85262 | 910000 |
| 4 | 3 | 4.0 | 4116 | 85266 | 971226 |
In [9]:
df.shape
Out[9]:
(535, 5)
In [10]:
# Keep the house prices between 300k and 900k (all others maybe considered outliers)
# Show distribution
Handle Images dataset¶
In [11]:
filepath = "/kaggle/input/home-data/Houses Dataset/"
# filepath = './Houses Dataset/'
In [12]:
## Bathroom.jpg
new_images=[]
for number in range(1, df.shape[0] + 1):
for path in glob.glob(filepath + str(number) + "_bathroom.jpg"):
if os.path.isfile(path):
new_images.append(path)
In [13]:
new_images[:5]
Out[13]:
['/kaggle/input/home-data/Houses Dataset/1_bathroom.jpg', '/kaggle/input/home-data/Houses Dataset/2_bathroom.jpg', '/kaggle/input/home-data/Houses Dataset/3_bathroom.jpg', '/kaggle/input/home-data/Houses Dataset/4_bathroom.jpg', '/kaggle/input/home-data/Houses Dataset/5_bathroom.jpg']
In [14]:
img= pd.DataFrame(new_images,columns = ['bathroom_img'])
In [15]:
## bedroom images
bedroom_images = []
for number in range(1, df.shape[0] + 1):
for path in glob.glob(filepath + str(number) + "_bedroom.jpg"):
if os.path.isfile(path):
bedroom_images.append(path)
In [16]:
img['bedroom_img']=bedroom_images
In [17]:
frontal_images = []
for number in range(1, df.shape[0] + 1):
for path in glob.glob(filepath + str(number) + "_frontal.jpg"):
if os.path.isfile(path):
frontal_images.append(path)
In [18]:
img['frontal_img']=frontal_images
In [19]:
kitchen_images = []
for number in range(1, df.shape[0] + 1):
for path in glob.glob(filepath + str(number) + "_kitchen.jpg"):
if os.path.isfile(path):
kitchen_images.append(path)
In [20]:
img['kitchen_img']=kitchen_images
In [21]:
img.head()
Out[21]:
| bathroom_img | bedroom_img | frontal_img | kitchen_img | |
|---|---|---|---|---|
| 0 | /kaggle/input/home-data/Houses Dataset/1_bathr... | /kaggle/input/home-data/Houses Dataset/1_bedro... | /kaggle/input/home-data/Houses Dataset/1_front... | /kaggle/input/home-data/Houses Dataset/1_kitch... |
| 1 | /kaggle/input/home-data/Houses Dataset/2_bathr... | /kaggle/input/home-data/Houses Dataset/2_bedro... | /kaggle/input/home-data/Houses Dataset/2_front... | /kaggle/input/home-data/Houses Dataset/2_kitch... |
| 2 | /kaggle/input/home-data/Houses Dataset/3_bathr... | /kaggle/input/home-data/Houses Dataset/3_bedro... | /kaggle/input/home-data/Houses Dataset/3_front... | /kaggle/input/home-data/Houses Dataset/3_kitch... |
| 3 | /kaggle/input/home-data/Houses Dataset/4_bathr... | /kaggle/input/home-data/Houses Dataset/4_bedro... | /kaggle/input/home-data/Houses Dataset/4_front... | /kaggle/input/home-data/Houses Dataset/4_kitch... |
| 4 | /kaggle/input/home-data/Houses Dataset/5_bathr... | /kaggle/input/home-data/Houses Dataset/5_bedro... | /kaggle/input/home-data/Houses Dataset/5_front... | /kaggle/input/home-data/Houses Dataset/5_kitch... |
Describe price distribution and handle outliers¶
In [22]:
price_summary = df['price'].describe()
price_summary
Out[22]:
count 5.350000e+02 mean 5.893628e+05 std 5.090261e+05 min 2.200000e+04 25% 2.492000e+05 50% 5.290000e+05 75% 7.285000e+05 max 5.858000e+06 Name: price, dtype: float64
In [23]:
# Print out the spread of prices using matplotlib
# Assuming df['prices'] contains your price data
plt.hist(df['price'], bins=30, edgecolor='black')
plt.title('Price Distribution')
plt.xlabel('Price')
plt.ylabel('Frequency')
plt.show()
In [24]:
# Remove values that are outliers
# df.iloc[row, col]
outliers = []
for i in range(0, len(df)):
if df.iloc[i, :].price < 300000 or df.iloc[i, :].price > 900000:
outliers.append(i)
# print("Outliers:", df.loc[outliers, 'price'])
# print(i, df.iloc[i, :].price)
print("Outliers", len(outliers))
print("Remaining", 535 - len(outliers))
print(outliers)
Outliers 227 Remaining 308 [3, 4, 5, 7, 8, 9, 11, 18, 19, 20, 21, 23, 24, 25, 26, 28, 29, 31, 33, 34, 35, 36, 38, 50, 53, 58, 59, 60, 62, 63, 64, 65, 66, 67, 68, 70, 71, 72, 74, 75, 76, 77, 78, 79, 86, 90, 92, 94, 96, 102, 109, 111, 112, 115, 125, 128, 130, 132, 133, 136, 143, 146, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 160, 161, 164, 168, 170, 172, 184, 185, 186, 190, 192, 202, 203, 207, 209, 214, 215, 216, 218, 219, 220, 223, 224, 226, 252, 255, 256, 257, 260, 261, 263, 266, 268, 270, 285, 286, 287, 288, 289, 290, 291, 292, 293, 318, 320, 321, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 425, 457, 463, 465, 471, 472, 476, 478, 483, 484, 485, 486, 487, 491, 499, 506, 514]
In [25]:
data = df[~df.index.isin(outliers)]
In [26]:
# Print out the spread of prices using matplotlib
# Assuming df['prices'] contains your price data
plt.hist(data['price'], bins=30, edgecolor='black')
plt.title('Price Distribution')
plt.xlabel('Price')
plt.ylabel('Frequency')
plt.show()
Merge bathroom, bedroom, frontal, and kitchen into one image¶
In [27]:
images_output=[]
img_df = img
for row_index,row in img.iterrows():
# Check if the row_index is in the outliers to do this do the outliers first
if row_index in outliers:
img_df = img_df[img_df.index != row_index] # Remove outlier
# print(img_df.shape)
continue
inputImages=[]
outputImage = np.zeros((128, 128, 3), dtype="uint8")
image_temp1 = cv2.imread(row.bathroom_img)
image1 = cv2.resize(image_temp1, (64 , 64))
image_temp2 = cv2.imread(row.bedroom_img)
image2 = cv2.resize(image_temp2, (64 , 64))
image_temp3 = cv2.imread(row.frontal_img)
image3 = cv2.resize(image_temp3, (64 , 64))
image_temp4 = cv2.imread(row.kitchen_img)
image4 = cv2.resize(image_temp4, (64 , 64))
inputImages.append(image1)
inputImages.append(image2)
inputImages.append(image3)
inputImages.append(image4)
outputImage[0:64, 0:64] = inputImages[0]
outputImage[0:64, 64:128] = inputImages[1]
outputImage[64:128, 64:128] = inputImages[2]
outputImage[64:128, 0:64] = inputImages[3]
images_output.append(outputImage)
In [28]:
plt.figure(figsize=(8,8))
plt.imshow(images_output[0], interpolation='nearest')
plt.show()
Check if same outliers are removed from both img and textual tables¶
In [29]:
# All the images match
value = data[img_df.index == data.index]
value.shape
Out[29]:
(308, 5)
Normalize/One Hot Encode Textual Data and Normalize image data¶
In [30]:
# One hot encode zip code & area
# encode_text_dummy(data, 'zipcode')
# encode_text_dummy(data, 'area')
data_formatted = pd.get_dummies(data, columns=['zipcode'])
In [31]:
data_formatted.head()
Out[31]:
| Bedrooms | Bathrooms | area | price | zipcode_36372 | zipcode_60002 | zipcode_62025 | zipcode_62249 | zipcode_81418 | zipcode_81524 | ... | zipcode_93314 | zipcode_93446 | zipcode_93510 | zipcode_94501 | zipcode_94531 | zipcode_94565 | zipcode_94568 | zipcode_95220 | zipcode_96019 | zipcode_98021 | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 4 | 4.0 | 4053 | 869500 | False | False | False | False | False | False | ... | False | False | False | False | False | False | False | False | False | False |
| 1 | 4 | 3.0 | 3343 | 865200 | True | False | False | False | False | False | ... | False | False | False | False | False | False | False | False | False | False |
| 2 | 3 | 4.0 | 3923 | 889000 | False | False | False | False | False | False | ... | False | False | False | False | False | False | False | False | False | False |
| 6 | 3 | 4.0 | 2544 | 799000 | False | False | False | False | False | False | ... | False | False | False | False | False | False | False | False | False | False |
| 10 | 5 | 5.0 | 4829 | 519200 | False | False | False | False | False | False | ... | False | False | False | False | False | False | False | False | False | False |
5 rows × 36 columns
In [32]:
# Normalize bedrooms & bathrooms & area
encode_numeric_zscore(data_formatted, 'Bedrooms')
encode_numeric_zscore(data_formatted, 'Bathrooms')
encode_numeric_zscore(data_formatted, 'area')
In [33]:
print(data_formatted['price'].dtype)
data_formatted['price'] = data_formatted['price'].astype("float32")
print(data_formatted['price'].dtype)
int64 float32
In [34]:
# data_formatted['price'] = data_formatted['price']
In [35]:
# normalize image data
img_output_df = np.array(images_output).astype('float32')
img_output_df /= 255.0
to_xy and Test/Train Split¶
In [36]:
# use to_xy to split the tabular data into x and y
x, y = to_xy(data_formatted, 'price')
print(x.shape, y.shape)
(308, 35) (308,)
In [37]:
y[:5]
Out[37]:
array([869500., 865200., 889000., 799000., 519200.], dtype=float32)
In [38]:
x_train, x_test, y_train, y_test= train_test_split(x, y, test_size=.20, random_state = 42)
In [39]:
print("Train Sizes :", x_train.shape, y_train.shape)
print("Test Sizes :", x_test.shape, y_test.shape)
Train Sizes : (246, 35) (246,) Test Sizes : (62, 35) (62,)
In [40]:
x_train_img, x_test_img = train_test_split(img_output_df, test_size=.20,
random_state=42)
In [41]:
print("Train Sizes :", x_train_img.shape)
print("Test Sizes :", x_test_img.shape)
Train Sizes : (246, 128, 128, 3) Test Sizes : (62, 128, 128, 3)
Pre Model Training Check¶
In [42]:
print(f"x_train shape: ", x_train.shape)
print(f"x_train_img shape: ", x_train_img.shape)
print(f"x_test shape: ", x_test.shape)
print(f"x_test_img shape: ", x_test_img.shape)
print()
print(f"y_train shape: ", y_train.shape)
print(f"y_test shape: ",y_test.shape)
x_train shape: (246, 35) x_train_img shape: (246, 128, 128, 3) x_test shape: (62, 35) x_test_img shape: (62, 128, 128, 3) y_train shape: (246,) y_test shape: (62,)
In [43]:
from keras.utils import plot_model
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import concatenate
from keras.optimizers import Adam, SGD
Base Model : Only Textual Input¶
In [44]:
filepath = './dnn/best_weights-base.keras'
checkpointer = ModelCheckpoint(filepath=filepath, verbose=0, save_best_only=True)
adam_optimizer = Adam(learning_rate=0.0075, beta_1=0.9, beta_2=0.999,epsilon=1e-07)
sgd_optimizer = SGD(learning_rate=0.007, momentum=0.9, nesterov=True)
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5,verbose=1, mode='auto')
model = Sequential()
model.add(Dense(4096, activation='relu'))
model.add(Dense(3000, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(2048, activation='relu'))
model.add(Dense(1024, activation='relu'))
model.add(Dense(516, activation='relu'))
model.add(Dense(1))
# print(model.summary())
# plot_model(model, show_shapes=True)
model.compile(loss='mean_squared_error', optimizer=adam_optimizer)
model.fit(x_train, y_train,
validation_data=(x_test, y_test),
callbacks=[monitor, checkpointer],verbose=2,epochs=200)
print('Training model 1 finished...Loading the best model')
print()
model.load_weights(filepath) # load weights from best model
pred_base = model.predict(x_test)
score_base = np.sqrt(metrics.mean_squared_error(pred_base,y_test))
print("Score (RMSE): {}".format(score_base))
Epoch 1/200
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR I0000 00:00:1729645961.509953 101 service.cc:145] XLA service 0x7acb28006fe0 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices: I0000 00:00:1729645961.509998 101 service.cc:153] StreamExecutor device (0): Tesla T4, Compute Capability 7.5 I0000 00:00:1729645961.510003 101 service.cc:153] StreamExecutor device (1): Tesla T4, Compute Capability 7.5 I0000 00:00:1729645964.844040 101 device_compiler.h:188] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process.
8/8 - 16s - 2s/step - loss: 205308133376.0000 - val_loss: 71286177792.0000 Epoch 2/200 8/8 - 1s - 153ms/step - loss: 39483125760.0000 - val_loss: 42175311872.0000 Epoch 3/200 8/8 - 1s - 144ms/step - loss: 41181831168.0000 - val_loss: 32654471168.0000 Epoch 4/200 8/8 - 0s - 13ms/step - loss: 31822434304.0000 - val_loss: 44866752512.0000 Epoch 5/200 8/8 - 1s - 148ms/step - loss: 23962263552.0000 - val_loss: 22959218688.0000 Epoch 6/200 8/8 - 4s - 487ms/step - loss: 20260399104.0000 - val_loss: 19696099328.0000 Epoch 7/200 8/8 - 0s - 13ms/step - loss: 20689192960.0000 - val_loss: 31720919040.0000 Epoch 8/200 8/8 - 0s - 12ms/step - loss: 20090097664.0000 - val_loss: 22250913792.0000 Epoch 9/200 8/8 - 0s - 12ms/step - loss: 17019675648.0000 - val_loss: 27193542656.0000 Epoch 10/200 8/8 - 1s - 152ms/step - loss: 13604897792.0000 - val_loss: 19151042560.0000 Epoch 11/200 8/8 - 2s - 259ms/step - loss: 10825491456.0000 - val_loss: 18894815232.0000 Epoch 12/200 8/8 - 0s - 13ms/step - loss: 10533401600.0000 - val_loss: 19779686400.0000 Epoch 13/200 8/8 - 0s - 12ms/step - loss: 14451417088.0000 - val_loss: 20516184064.0000 Epoch 14/200 8/8 - 0s - 12ms/step - loss: 12054653952.0000 - val_loss: 21815433216.0000 Epoch 15/200 8/8 - 0s - 12ms/step - loss: 10899614720.0000 - val_loss: 23821811712.0000 Epoch 16/200 8/8 - 1s - 149ms/step - loss: 13756422144.0000 - val_loss: 17567539200.0000 Epoch 17/200 8/8 - 0s - 12ms/step - loss: 15954426880.0000 - val_loss: 27057399808.0000 Epoch 18/200 8/8 - 0s - 12ms/step - loss: 13367968768.0000 - val_loss: 24133998592.0000 Epoch 19/200 8/8 - 0s - 12ms/step - loss: 14764444672.0000 - val_loss: 19979614208.0000 Epoch 20/200 8/8 - 0s - 11ms/step - loss: 12718134272.0000 - val_loss: 20548304896.0000 Epoch 21/200 8/8 - 0s - 11ms/step - loss: 11548692480.0000 - val_loss: 22658754560.0000 Epoch 21: early stopping Training model 1 finished...Loading the best model 2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 187ms/step Score (RMSE): 132542.609375
In [45]:
chart_regression(pred_base.flatten(),y_test)
Both Input : Model #1¶
In [46]:
filepath = './dnn/best_weights1.keras'
checkpointer = ModelCheckpoint(filepath=filepath, verbose=0, save_best_only=True)
adam_optimizer = Adam(learning_rate=0.005, beta_1=0.9, beta_2=0.999, epsilon=1e-07)
sgd_optimizer = SGD(learning_rate=0.005, momentum=0.9, nesterov=True)
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=5,verbose=1, mode='auto')
# first image input model
visible1 = Input(shape=(128, 128, 3))
conv11 = Conv2D(32, kernel_size=4, activation='relu')(visible1)
pool11 = MaxPooling2D(pool_size=(2, 2))(conv11)
conv12 = Conv2D(64, kernel_size=4, activation='relu')(pool11)
pool12 = MaxPooling2D(pool_size=(2, 2))(conv12)
# conv13 = Conv2D(64, kernel_size=4, activation='relu')(pool12)
# pool13 = MaxPooling2D(pool_size=(2, 2))(conv13)
flat1 = Flatten()(pool12)
# Tabular data input model
visible2 = Input(shape=(x.shape[1],))
dense1 = Dense(128, activation='relu')(visible2)
dense2 = Dense(64, activation='relu')(dense1)
dense3 = Dense(32, activation='relu')(dense2)
merge = concatenate([flat1, dense3])
# hidden1 = Dense(32, activation='relu')(merge)
# hidden2 = Dense(8, activation='relu')(hidden1)
# output = Dense(1)(hidden2)
output = Dense(1)(merge)
model = Model(inputs=[visible1, visible2], outputs=output)
# print(model.summary())
# plot_model(model, show_shapes=True)
model.compile(loss='mean_squared_error', optimizer=adam_optimizer)
model.fit([x_train_img, x_train], y_train,
validation_data=([x_test_img, x_test], y_test),
callbacks=[monitor, checkpointer],verbose=2,epochs=100)
print('Training model 1 finished...Loading the best model')
print()
model.load_weights(filepath) # load weights from best model
pred = model.predict([x_test_img, x_test])
score = np.sqrt(metrics.mean_squared_error(pred,y_test))
print("Score (RMSE): {}".format(score))
Epoch 1/100 8/8 - 8s - 1s/step - loss: 355371155456.0000 - val_loss: 359970963456.0000 Epoch 2/100 8/8 - 0s - 27ms/step - loss: 316526166016.0000 - val_loss: 258217508864.0000 Epoch 3/100 8/8 - 0s - 26ms/step - loss: 157490135040.0000 - val_loss: 40245022720.0000 Epoch 4/100 8/8 - 0s - 22ms/step - loss: 43660349440.0000 - val_loss: 59467612160.0000 Epoch 5/100 8/8 - 0s - 28ms/step - loss: 35723415552.0000 - val_loss: 35813642240.0000 Epoch 6/100 8/8 - 0s - 27ms/step - loss: 35536146432.0000 - val_loss: 32045844480.0000 Epoch 7/100 8/8 - 0s - 31ms/step - loss: 28900614144.0000 - val_loss: 32354478080.0000 Epoch 8/100 8/8 - 0s - 42ms/step - loss: 28354785280.0000 - val_loss: 29548832768.0000 Epoch 9/100 8/8 - 0s - 25ms/step - loss: 27668094976.0000 - val_loss: 29022355456.0000 Epoch 10/100 8/8 - 0s - 20ms/step - loss: 27189727232.0000 - val_loss: 29022423040.0000 Epoch 11/100 8/8 - 0s - 20ms/step - loss: 26892947456.0000 - val_loss: 29162377216.0000 Epoch 12/100 8/8 - 0s - 25ms/step - loss: 26572111872.0000 - val_loss: 28654704640.0000 Epoch 13/100 8/8 - 0s - 25ms/step - loss: 26343370752.0000 - val_loss: 28191791104.0000 Epoch 14/100 8/8 - 0s - 25ms/step - loss: 25341939712.0000 - val_loss: 27576506368.0000 Epoch 15/100 8/8 - 0s - 25ms/step - loss: 24539777024.0000 - val_loss: 26973394944.0000 Epoch 16/100 8/8 - 0s - 25ms/step - loss: 23434692608.0000 - val_loss: 26428973056.0000 Epoch 17/100 8/8 - 0s - 25ms/step - loss: 22721490944.0000 - val_loss: 25822529536.0000 Epoch 18/100 8/8 - 0s - 25ms/step - loss: 22040862720.0000 - val_loss: 25555359744.0000 Epoch 19/100 8/8 - 0s - 25ms/step - loss: 20982036480.0000 - val_loss: 24436015104.0000 Epoch 20/100 8/8 - 0s - 25ms/step - loss: 20656363520.0000 - val_loss: 23632744448.0000 Epoch 21/100 8/8 - 0s - 25ms/step - loss: 19649619968.0000 - val_loss: 23087493120.0000 Epoch 22/100 8/8 - 0s - 25ms/step - loss: 18815162368.0000 - val_loss: 22236049408.0000 Epoch 23/100 8/8 - 0s - 25ms/step - loss: 18231007232.0000 - val_loss: 21399592960.0000 Epoch 24/100 8/8 - 0s - 25ms/step - loss: 17510416384.0000 - val_loss: 20707239936.0000 Epoch 25/100 8/8 - 0s - 25ms/step - loss: 16709104640.0000 - val_loss: 20381603840.0000 Epoch 26/100 8/8 - 0s - 27ms/step - loss: 16269428736.0000 - val_loss: 19677763584.0000 Epoch 27/100 8/8 - 0s - 25ms/step - loss: 15813370880.0000 - val_loss: 19269513216.0000 Epoch 28/100 8/8 - 0s - 25ms/step - loss: 15320029184.0000 - val_loss: 18756499456.0000 Epoch 29/100 8/8 - 0s - 25ms/step - loss: 15028980736.0000 - val_loss: 18404753408.0000 Epoch 30/100 8/8 - 0s - 25ms/step - loss: 14922741760.0000 - val_loss: 18146000896.0000 Epoch 31/100 8/8 - 0s - 25ms/step - loss: 14087390208.0000 - val_loss: 17947486208.0000 Epoch 32/100 8/8 - 0s - 25ms/step - loss: 14019723264.0000 - val_loss: 17645209600.0000 Epoch 33/100 8/8 - 0s - 25ms/step - loss: 14538913792.0000 - val_loss: 17450928128.0000 Epoch 34/100 8/8 - 0s - 25ms/step - loss: 13422679040.0000 - val_loss: 17424601088.0000 Epoch 35/100 8/8 - 0s - 25ms/step - loss: 13276497920.0000 - val_loss: 17175976960.0000 Epoch 36/100 8/8 - 0s - 25ms/step - loss: 12968394752.0000 - val_loss: 17128804352.0000 Epoch 37/100 8/8 - 0s - 25ms/step - loss: 12886464512.0000 - val_loss: 16861050880.0000 Epoch 38/100 8/8 - 0s - 25ms/step - loss: 12606860288.0000 - val_loss: 16709944320.0000 Epoch 39/100 8/8 - 0s - 25ms/step - loss: 12504014848.0000 - val_loss: 16673518592.0000 Epoch 40/100 8/8 - 0s - 25ms/step - loss: 12283606016.0000 - val_loss: 16484448256.0000 Epoch 41/100 8/8 - 0s - 25ms/step - loss: 12126420992.0000 - val_loss: 16355249152.0000 Epoch 42/100 8/8 - 0s - 25ms/step - loss: 12039969792.0000 - val_loss: 16233763840.0000 Epoch 43/100 8/8 - 0s - 25ms/step - loss: 11898134528.0000 - val_loss: 16059382784.0000 Epoch 44/100 8/8 - 0s - 25ms/step - loss: 11793110016.0000 - val_loss: 15931476992.0000 Epoch 45/100 8/8 - 0s - 20ms/step - loss: 11659071488.0000 - val_loss: 15943547904.0000 Epoch 46/100 8/8 - 0s - 26ms/step - loss: 11519364096.0000 - val_loss: 15823844352.0000 Epoch 47/100 8/8 - 0s - 25ms/step - loss: 11355092992.0000 - val_loss: 15711588352.0000 Epoch 48/100 8/8 - 0s - 25ms/step - loss: 11425711104.0000 - val_loss: 15675561984.0000 Epoch 49/100 8/8 - 0s - 25ms/step - loss: 11325589504.0000 - val_loss: 15646523392.0000 Epoch 50/100 8/8 - 0s - 26ms/step - loss: 11186330624.0000 - val_loss: 15547138048.0000 Epoch 51/100 8/8 - 0s - 20ms/step - loss: 11051229184.0000 - val_loss: 15725873152.0000 Epoch 52/100 8/8 - 0s - 25ms/step - loss: 11124929536.0000 - val_loss: 15469293568.0000 Epoch 53/100 8/8 - 0s - 20ms/step - loss: 10650782720.0000 - val_loss: 15600510976.0000 Epoch 54/100 8/8 - 0s - 25ms/step - loss: 10923679744.0000 - val_loss: 15252531200.0000 Epoch 55/100 8/8 - 0s - 25ms/step - loss: 10619496448.0000 - val_loss: 14981457920.0000 Epoch 56/100 8/8 - 0s - 25ms/step - loss: 10490962944.0000 - val_loss: 14876012544.0000 Epoch 57/100 8/8 - 0s - 25ms/step - loss: 10423516160.0000 - val_loss: 14791948288.0000 Epoch 58/100 8/8 - 0s - 25ms/step - loss: 10307610624.0000 - val_loss: 14748005376.0000 Epoch 59/100 8/8 - 0s - 25ms/step - loss: 10472237056.0000 - val_loss: 14713533440.0000 Epoch 60/100 8/8 - 0s - 20ms/step - loss: 10775097344.0000 - val_loss: 14745447424.0000 Epoch 61/100 8/8 - 0s - 25ms/step - loss: 10119738368.0000 - val_loss: 14534434816.0000 Epoch 62/100 8/8 - 0s - 25ms/step - loss: 10100016128.0000 - val_loss: 14412533760.0000 Epoch 63/100 8/8 - 0s - 20ms/step - loss: 9931453440.0000 - val_loss: 14424089600.0000 Epoch 64/100 8/8 - 0s - 25ms/step - loss: 9831329792.0000 - val_loss: 14192845824.0000 Epoch 65/100 8/8 - 0s - 25ms/step - loss: 9772566528.0000 - val_loss: 14097193984.0000 Epoch 66/100 8/8 - 0s - 25ms/step - loss: 9726860288.0000 - val_loss: 13966750720.0000 Epoch 67/100 8/8 - 0s - 25ms/step - loss: 9681019904.0000 - val_loss: 13944439808.0000 Epoch 68/100 8/8 - 0s - 20ms/step - loss: 9644004352.0000 - val_loss: 13981966336.0000 Epoch 69/100 8/8 - 0s - 19ms/step - loss: 9518442496.0000 - val_loss: 14147939328.0000 Epoch 70/100 8/8 - 0s - 25ms/step - loss: 9457572864.0000 - val_loss: 13819728896.0000 Epoch 71/100 8/8 - 0s - 25ms/step - loss: 9426213888.0000 - val_loss: 13745347584.0000 Epoch 72/100 8/8 - 0s - 20ms/step - loss: 9497756672.0000 - val_loss: 13820730368.0000 Epoch 73/100 8/8 - 0s - 20ms/step - loss: 9376811008.0000 - val_loss: 13838429184.0000 Epoch 74/100 8/8 - 0s - 26ms/step - loss: 9283536896.0000 - val_loss: 13637082112.0000 Epoch 75/100 8/8 - 0s - 25ms/step - loss: 9224225792.0000 - val_loss: 13606193152.0000 Epoch 76/100 8/8 - 0s - 25ms/step - loss: 9190806528.0000 - val_loss: 13496722432.0000 Epoch 77/100 8/8 - 0s - 37ms/step - loss: 9388322816.0000 - val_loss: 13451107328.0000 Epoch 78/100 8/8 - 0s - 21ms/step - loss: 9206947840.0000 - val_loss: 13689434112.0000 Epoch 79/100 8/8 - 0s - 25ms/step - loss: 9123401728.0000 - val_loss: 13294406656.0000 Epoch 80/100 8/8 - 0s - 20ms/step - loss: 9035085824.0000 - val_loss: 13615443968.0000 Epoch 81/100 8/8 - 0s - 25ms/step - loss: 9191953408.0000 - val_loss: 13180433408.0000 Epoch 82/100 8/8 - 0s - 20ms/step - loss: 8914309120.0000 - val_loss: 13584086016.0000 Epoch 83/100 8/8 - 0s - 25ms/step - loss: 8807494656.0000 - val_loss: 13172307968.0000 Epoch 84/100 8/8 - 0s - 20ms/step - loss: 8837336064.0000 - val_loss: 13224507392.0000 Epoch 85/100 8/8 - 0s - 20ms/step - loss: 8716997632.0000 - val_loss: 13194428416.0000 Epoch 86/100 8/8 - 0s - 25ms/step - loss: 8721318912.0000 - val_loss: 13105902592.0000 Epoch 87/100 8/8 - 0s - 25ms/step - loss: 8813201408.0000 - val_loss: 13056398336.0000 Epoch 88/100 8/8 - 0s - 20ms/step - loss: 8745939968.0000 - val_loss: 13194889216.0000 Epoch 89/100 8/8 - 0s - 25ms/step - loss: 8778481664.0000 - val_loss: 12853784576.0000 Epoch 90/100 8/8 - 0s - 20ms/step - loss: 8677250048.0000 - val_loss: 13211665408.0000 Epoch 91/100 8/8 - 0s - 20ms/step - loss: 8737598464.0000 - val_loss: 12879473664.0000 Epoch 92/100 8/8 - 0s - 20ms/step - loss: 8437882368.0000 - val_loss: 13241866240.0000 Epoch 93/100 8/8 - 0s - 20ms/step - loss: 8589302784.0000 - val_loss: 12917735424.0000 Epoch 94/100 8/8 - 0s - 25ms/step - loss: 8457161728.0000 - val_loss: 12696206336.0000 Epoch 95/100 8/8 - 0s - 20ms/step - loss: 8408297472.0000 - val_loss: 12766589952.0000 Epoch 96/100 8/8 - 0s - 25ms/step - loss: 8442716672.0000 - val_loss: 12638266368.0000 Epoch 97/100 8/8 - 0s - 25ms/step - loss: 8370918400.0000 - val_loss: 12414097408.0000 Epoch 98/100 8/8 - 0s - 20ms/step - loss: 8351080960.0000 - val_loss: 12703996928.0000 Epoch 99/100 8/8 - 0s - 20ms/step - loss: 8647768064.0000 - val_loss: 12512877568.0000 Epoch 100/100 8/8 - 0s - 20ms/step - loss: 8431803904.0000 - val_loss: 12491300864.0000 Training model 1 finished...Loading the best model 2/2 ━━━━━━━━━━━━━━━━━━━━ 1s 296ms/step Score (RMSE): 111418.5703125
In [47]:
chart_regression(pred.flatten(),y_test)
Both inputs : Model #2¶
In [48]:
filepath = './dnn/best_weights1.keras'
checkpointer = ModelCheckpoint(filepath=filepath, verbose=0, save_best_only=True)
adam_optimizer = Adam(learning_rate=0.0075, beta_1=0.9, beta_2=0.999, epsilon=1e-07)
sgd_optimizer = SGD(learning_rate=0.01, momentum=0.9, nesterov=True)
# Maybe go between 1e-3 and 1e-2 = .5e-3
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=5,verbose=1, mode='auto')
# first image input model
visible1 = Input(shape=(128, 128, 3))
conv11 = Conv2D(32, kernel_size=4, activation='relu')(visible1)
pool11 = MaxPooling2D(pool_size=(2, 2))(conv11)
conv12 = Conv2D(64, kernel_size=4, activation='relu')(pool11)
pool12 = MaxPooling2D(pool_size=(2, 2))(conv12)
conv13 = Conv2D(128, kernel_size=4, activation='relu')(pool12)
pool13 = MaxPooling2D(pool_size=(2, 2))(conv13)
conv14 = Conv2D(256, kernel_size=4, activation='relu')(pool13)
pool14 = MaxPooling2D(pool_size=(2, 2))(conv14)
# pool14 = Dropout(0.5)(pool14)
flat1 = Flatten()(pool14)
# Tabular data input model
visible2 = Input(shape=(x.shape[1],))
dense1 = Dense(2048, activation='relu')(visible2)
# dense1 = Dropout(0.3)(dense1)
dense2 = Dense(1024, activation='relu')(dense1)
dense3 = Dense(516, activation='relu')(dense2)
merge = concatenate([flat1, dense3])
hidden1 = Dense(1024, activation='relu')(merge)
hidden2 = Dense(512, activation='relu')(hidden1)
hidden2 = Dense(256, activation='relu')(hidden1)
output = Dense(1)(hidden2)
# output = Dense(1)(merge)
model = Model(inputs=[visible1, visible2], outputs=output)
# print(model.summary())
# plot_model(model, show_shapes=True)
model.compile(loss='mean_squared_error', optimizer=adam_optimizer)
model.fit([x_train_img, x_train], y_train,
validation_data=([x_test_img, x_test], y_test),
callbacks=[monitor, checkpointer],verbose=2,epochs=100)
print('Training model 1 finished...Loading the best model')
print()
model.load_weights(filepath) # load weights from best model
pred2 = model.predict([x_test_img, x_test])
score2 = np.sqrt(metrics.mean_squared_error(pred2,y_test))
print("Score (RMSE): {}".format(score2))
Epoch 1/100 8/8 - 15s - 2s/step - loss: 389828804608.0000 - val_loss: 370462785536.0000 Epoch 2/100 8/8 - 1s - 97ms/step - loss: 356003119104.0000 - val_loss: 361044738048.0000 Epoch 3/100 8/8 - 1s - 94ms/step - loss: 247356686336.0000 - val_loss: 74892238848.0000 Epoch 4/100 8/8 - 1s - 93ms/step - loss: 48796364800.0000 - val_loss: 43721646080.0000 Epoch 5/100 8/8 - 1s - 92ms/step - loss: 33067431936.0000 - val_loss: 28155848704.0000 Epoch 6/100 8/8 - 0s - 27ms/step - loss: 27799035904.0000 - val_loss: 32036517888.0000 Epoch 7/100 8/8 - 0s - 27ms/step - loss: 27639185408.0000 - val_loss: 29596291072.0000 Epoch 8/100 8/8 - 0s - 27ms/step - loss: 22858250240.0000 - val_loss: 28584753152.0000 Epoch 9/100 8/8 - 1s - 90ms/step - loss: 20207935488.0000 - val_loss: 18616401920.0000 Epoch 10/100 8/8 - 3s - 374ms/step - loss: 14237435904.0000 - val_loss: 16916883456.0000 Epoch 11/100 8/8 - 1s - 97ms/step - loss: 13455213568.0000 - val_loss: 16301957120.0000 Epoch 12/100 8/8 - 1s - 88ms/step - loss: 14092354560.0000 - val_loss: 11635962880.0000 Epoch 13/100 8/8 - 0s - 28ms/step - loss: 10395865088.0000 - val_loss: 12173178880.0000 Epoch 14/100 8/8 - 0s - 27ms/step - loss: 10911203328.0000 - val_loss: 12494735360.0000 Epoch 15/100 8/8 - 0s - 28ms/step - loss: 9924709376.0000 - val_loss: 12870940672.0000 Epoch 16/100 8/8 - 0s - 28ms/step - loss: 9601597440.0000 - val_loss: 13427461120.0000 Epoch 17/100 8/8 - 1s - 176ms/step - loss: 9325877248.0000 - val_loss: 11597479936.0000 Epoch 18/100 8/8 - 1s - 89ms/step - loss: 8464832000.0000 - val_loss: 11062200320.0000 Epoch 19/100 8/8 - 0s - 29ms/step - loss: 8442086400.0000 - val_loss: 11811104768.0000 Epoch 20/100 8/8 - 0s - 27ms/step - loss: 7618535424.0000 - val_loss: 12601833472.0000 Epoch 21/100 8/8 - 0s - 27ms/step - loss: 8098503168.0000 - val_loss: 11812276224.0000 Epoch 22/100 8/8 - 1s - 90ms/step - loss: 8024230912.0000 - val_loss: 10934122496.0000 Epoch 23/100 8/8 - 0s - 27ms/step - loss: 7319531520.0000 - val_loss: 10959563776.0000 Epoch 24/100 8/8 - 0s - 27ms/step - loss: 8123524096.0000 - val_loss: 12770299904.0000 Epoch 25/100 8/8 - 0s - 27ms/step - loss: 8078976512.0000 - val_loss: 21616267264.0000 Epoch 26/100 8/8 - 0s - 28ms/step - loss: 10584829952.0000 - val_loss: 22934452224.0000 Epoch 27/100 8/8 - 0s - 27ms/step - loss: 10590212096.0000 - val_loss: 14250762240.0000 Epoch 27: early stopping Training model 1 finished...Loading the best model 2/2 ━━━━━━━━━━━━━━━━━━━━ 1s 418ms/step Score (RMSE): 104566.34375
In [49]:
chart_regression(pred2.flatten(),y_test)
Both Inputs : Model # 3¶
In [51]:
filepath = './dnn/best_weights3.keras'
checkpointer = ModelCheckpoint(filepath=filepath, verbose=0, save_best_only=True)
adam_optimizer = Adam(learning_rate=0.0075, beta_1=0.9, beta_2=0.999, epsilon=1e-07)
sgd_optimizer = SGD(learning_rate=0.01, momentum=0.9, nesterov=True)
# Maybe go between 1e-3 and 1e-2 = .5e-3
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=5,verbose=1, mode='auto')
# first image input model
visible1 = Input(shape=(128, 128, 3))
conv11 = Conv2D(32, kernel_size=4, activation='relu')(visible1)
conv11 = Conv2D(32, kernel_size=4, activation='relu')(conv11)
pool11 = MaxPooling2D(pool_size=(2, 2))(conv11)
conv12 = Conv2D(64, kernel_size=4, activation='relu')(pool11)
conv12 = Conv2D(64, kernel_size=4, activation='relu')(conv12)
pool12 = MaxPooling2D(pool_size=(2, 2))(conv12)
conv13 = Conv2D(128, kernel_size=4, activation='relu')(pool12)
conv13 = Conv2D(256, kernel_size=4, activation='relu')(conv12)
pool13 = Dropout(0.5)(conv13) # Why does this work?
pool13 = MaxPooling2D(pool_size=(2, 2))(conv13)
conv14 = Conv2D(512, kernel_size=4, activation='relu')(pool13)
pool14 = MaxPooling2D(pool_size=(2, 2))(conv14)
# pool14 = Dropout(0.5)(pool14)
flat1 = Flatten()(pool14)
# Tabular data input model
visible2 = Input(shape=(x.shape[1],))
dense1 = Dense(2048, activation='relu')(visible2)
dense1 = Dropout(0.3)(dense1)
dense2 = Dense(1024, activation='relu')(dense1)
dense2 = Dropout(0.5)(dense2)
dense3 = Dense(516, activation='relu')(dense2)
merge = concatenate([flat1, dense3])
hidden1 = Dense(1024, activation='relu')(merge)
hidden2 = Dense(512, activation='relu')(hidden1)
hidden3 = Dense(256, activation='relu')(hidden2)
output = Dense(1)(hidden3)
# output = Dense(1)(merge)
model = Model(inputs=[visible1, visible2], outputs=output)
# print(model.summary())
# plot_model(model, show_shapes=True)
model.compile(loss='mean_squared_error', optimizer=adam_optimizer)
model.fit([x_train_img, x_train], y_train,
validation_data=([x_test_img, x_test], y_test),
callbacks=[monitor, checkpointer],verbose=2,epochs=100)
print('Training model 1 finished...Loading the best model')
print()
model.load_weights(filepath) # load weights from best model
pred3 = model.predict([x_test_img, x_test])
score3 = np.sqrt(metrics.mean_squared_error(pred3,y_test))
print("Score (RMSE): {}".format(score3))
Epoch 1/100 8/8 - 37s - 5s/step - loss: 288773341184.0000 - val_loss: 3572049379328.0000 Epoch 2/100 8/8 - 4s - 561ms/step - loss: 717000146944.0000 - val_loss: 370902532096.0000 Epoch 3/100 8/8 - 7s - 854ms/step - loss: 356601331712.0000 - val_loss: 366871117824.0000 Epoch 4/100 8/8 - 5s - 682ms/step - loss: 250228490240.0000 - val_loss: 80654065664.0000 Epoch 5/100 8/8 - 7s - 861ms/step - loss: 47811641344.0000 - val_loss: 30857465856.0000 Epoch 6/100 8/8 - 5s - 682ms/step - loss: 29340442624.0000 - val_loss: 27620294656.0000 Epoch 7/100 8/8 - 7s - 851ms/step - loss: 21733648384.0000 - val_loss: 22747494400.0000 Epoch 8/100 8/8 - 1s - 116ms/step - loss: 21051240448.0000 - val_loss: 22858186752.0000 Epoch 9/100 8/8 - 4s - 548ms/step - loss: 18788083712.0000 - val_loss: 20866897920.0000 Epoch 10/100 8/8 - 1s - 112ms/step - loss: 16698616832.0000 - val_loss: 23065987072.0000 Epoch 11/100 8/8 - 1s - 105ms/step - loss: 16494222336.0000 - val_loss: 22608250880.0000 Epoch 12/100 8/8 - 4s - 555ms/step - loss: 17219028992.0000 - val_loss: 15397894144.0000 Epoch 13/100 8/8 - 5s - 641ms/step - loss: 12522994688.0000 - val_loss: 15370249216.0000 Epoch 14/100 8/8 - 1s - 112ms/step - loss: 11368299520.0000 - val_loss: 15682735104.0000 Epoch 15/100 8/8 - 7s - 840ms/step - loss: 11442593792.0000 - val_loss: 11902702592.0000 Epoch 16/100 8/8 - 1s - 111ms/step - loss: 11103521792.0000 - val_loss: 11929035776.0000 Epoch 17/100 8/8 - 1s - 104ms/step - loss: 10806233088.0000 - val_loss: 17628491776.0000 Epoch 18/100 8/8 - 1s - 104ms/step - loss: 11756621824.0000 - val_loss: 20629256192.0000 Epoch 19/100 8/8 - 1s - 104ms/step - loss: 12366111744.0000 - val_loss: 14723565568.0000 Epoch 20/100 8/8 - 1s - 106ms/step - loss: 9664669696.0000 - val_loss: 16066801664.0000 Epoch 20: early stopping Training model 1 finished...Loading the best model 2/2 ━━━━━━━━━━━━━━━━━━━━ 1s 509ms/step Score (RMSE): 109099.5
In [54]:
chart_regression(pred3.flatten(),y_test)
Transfer Learning : VGG16 #1¶
In [77]:
# Transfer learning
from keras.applications.vgg16 import VGG16
filepath = './dnn/best_weights-vgg1.keras'
checkpointer = ModelCheckpoint(filepath=filepath, verbose=0, save_best_only=True)
adam_optimizer = Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07)
sgd_optimizer = SGD(learning_rate=0.005, momentum=0.9, nesterov=True)
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=5, verbose=1, mode='auto')
# Transfer - Learning : VGG 16
vgg = VGG16(weights='imagenet', include_top=False, input_shape=(128, 128, 3))
# Freeze the weights
for layer in vgg.layers:
layer.trainable = False
conv_layer = Conv2D(128, kernel_size=3, activation='relu')(vgg.output)
max_pool = MaxPooling2D(pool_size=(2, 2))(conv_layer)
vgg_out = Flatten()(max_pool)
visible1 = vgg.input
# Tabular data input model
visible2 = Input(shape=(x.shape[1],))
dense1 = Dense(2048, activation='relu')(visible2)
dense2 = Dense(1024, activation='relu')(dense1)
dense3 = Dense(512, activation='relu')(dense2)
merge = concatenate([vgg_out, dense3])
output = Dense(1)(merge)
model = Model(inputs=[visible1, visible2], outputs=output)
# print(model.summary())
model.compile(loss='mean_squared_error', optimizer=adam_optimizer)
model.fit([x_train_img, x_train], y_train,
validation_data=([x_test_img, x_test], y_test),
callbacks=[monitor, checkpointer],verbose=2,epochs=100)
print('Training finished...Loading the best model')
print()
model.load_weights(filepath) # load weights from best model
pred_vgg1 = model.predict([x_test_img, x_test])
score_vgg1 = np.sqrt(metrics.mean_squared_error(pred_vgg1,y_test))
print("Score (RMSE): {}".format(score_vgg1))
Epoch 1/100 8/8 - 6s - 764ms/step - loss: 357109497856.0000 - val_loss: 370915442688.0000 Epoch 2/100 8/8 - 1s - 113ms/step - loss: 356988944384.0000 - val_loss: 370658705408.0000 Epoch 3/100 8/8 - 1s - 105ms/step - loss: 356540809216.0000 - val_loss: 369840062464.0000 Epoch 4/100 8/8 - 1s - 105ms/step - loss: 355244867584.0000 - val_loss: 367667609600.0000 Epoch 5/100 8/8 - 1s - 109ms/step - loss: 352157007872.0000 - val_loss: 362729996288.0000 Epoch 6/100 8/8 - 1s - 106ms/step - loss: 345199706112.0000 - val_loss: 352804208640.0000 Epoch 7/100 8/8 - 2s - 263ms/step - loss: 331964973056.0000 - val_loss: 334684979200.0000 Epoch 8/100 8/8 - 1s - 105ms/step - loss: 309405581312.0000 - val_loss: 304690692096.0000 Epoch 9/100 8/8 - 1s - 106ms/step - loss: 272897736704.0000 - val_loss: 259861118976.0000 Epoch 10/100 8/8 - 1s - 105ms/step - loss: 221373038592.0000 - val_loss: 200332050432.0000 Epoch 11/100 8/8 - 2s - 299ms/step - loss: 155859075072.0000 - val_loss: 135237328896.0000 Epoch 12/100 8/8 - 1s - 115ms/step - loss: 92154290176.0000 - val_loss: 83900424192.0000 Epoch 13/100 8/8 - 1s - 104ms/step - loss: 51194753024.0000 - val_loss: 67028721664.0000 Epoch 14/100 8/8 - 0s - 55ms/step - loss: 42852528128.0000 - val_loss: 68638695424.0000 Epoch 15/100 8/8 - 1s - 107ms/step - loss: 41574993920.0000 - val_loss: 62080843776.0000 Epoch 16/100 8/8 - 2s - 197ms/step - loss: 36203331584.0000 - val_loss: 54432636928.0000 Epoch 17/100 8/8 - 1s - 106ms/step - loss: 33155434496.0000 - val_loss: 49799630848.0000 Epoch 18/100 8/8 - 1s - 104ms/step - loss: 30410362880.0000 - val_loss: 46167195648.0000 Epoch 19/100 8/8 - 1s - 107ms/step - loss: 28028039168.0000 - val_loss: 42810753024.0000 Epoch 20/100 8/8 - 1s - 106ms/step - loss: 25905510400.0000 - val_loss: 39954575360.0000 Epoch 21/100 8/8 - 2s - 291ms/step - loss: 24549427200.0000 - val_loss: 37577650176.0000 Epoch 22/100 8/8 - 1s - 113ms/step - loss: 22828931072.0000 - val_loss: 35228577792.0000 Epoch 23/100 8/8 - 1s - 108ms/step - loss: 21526882304.0000 - val_loss: 33358602240.0000 Epoch 24/100 8/8 - 1s - 104ms/step - loss: 20345645056.0000 - val_loss: 31762339840.0000 Epoch 25/100 8/8 - 2s - 248ms/step - loss: 19263127552.0000 - val_loss: 30256883712.0000 Epoch 26/100 8/8 - 1s - 106ms/step - loss: 18374680576.0000 - val_loss: 28959184896.0000 Epoch 27/100 8/8 - 1s - 105ms/step - loss: 17565769728.0000 - val_loss: 27787622400.0000 Epoch 28/100 8/8 - 1s - 107ms/step - loss: 16856208384.0000 - val_loss: 26816325632.0000 Epoch 29/100 8/8 - 1s - 105ms/step - loss: 16157313024.0000 - val_loss: 26037690368.0000 Epoch 30/100 8/8 - 2s - 255ms/step - loss: 15721502720.0000 - val_loss: 25220882432.0000 Epoch 31/100 8/8 - 1s - 106ms/step - loss: 15138616320.0000 - val_loss: 24380461056.0000 Epoch 32/100 8/8 - 1s - 109ms/step - loss: 14616924160.0000 - val_loss: 23868805120.0000 Epoch 33/100 8/8 - 1s - 105ms/step - loss: 14378519552.0000 - val_loss: 23376881664.0000 Epoch 34/100 8/8 - 1s - 106ms/step - loss: 13934904320.0000 - val_loss: 22926737408.0000 Epoch 35/100 8/8 - 2s - 198ms/step - loss: 13607005184.0000 - val_loss: 22553686016.0000 Epoch 36/100 8/8 - 1s - 106ms/step - loss: 13331350528.0000 - val_loss: 22229297152.0000 Epoch 37/100 8/8 - 1s - 104ms/step - loss: 13104901120.0000 - val_loss: 21864189952.0000 Epoch 38/100 8/8 - 1s - 103ms/step - loss: 12864657408.0000 - val_loss: 21456535552.0000 Epoch 39/100 8/8 - 1s - 103ms/step - loss: 12636702720.0000 - val_loss: 21239238656.0000 Epoch 40/100 8/8 - 1s - 104ms/step - loss: 12525586432.0000 - val_loss: 21172148224.0000 Epoch 41/100 8/8 - 2s - 205ms/step - loss: 12347839488.0000 - val_loss: 20952756224.0000 Epoch 42/100 8/8 - 1s - 109ms/step - loss: 12157690880.0000 - val_loss: 20701761536.0000 Epoch 43/100 8/8 - 1s - 105ms/step - loss: 12009214976.0000 - val_loss: 20459124736.0000 Epoch 44/100 8/8 - 1s - 105ms/step - loss: 11943318528.0000 - val_loss: 20259586048.0000 Epoch 45/100 8/8 - 2s - 229ms/step - loss: 11836160000.0000 - val_loss: 20171241472.0000 Epoch 46/100 8/8 - 0s - 55ms/step - loss: 11675684864.0000 - val_loss: 20179892224.0000 Epoch 47/100 8/8 - 1s - 106ms/step - loss: 11696166912.0000 - val_loss: 20047366144.0000 Epoch 48/100 8/8 - 1s - 107ms/step - loss: 11498198016.0000 - val_loss: 19999086592.0000 Epoch 49/100 8/8 - 1s - 107ms/step - loss: 11464457216.0000 - val_loss: 19722694656.0000 Epoch 50/100 8/8 - 1s - 105ms/step - loss: 11362777088.0000 - val_loss: 19586461696.0000 Epoch 51/100 8/8 - 2s - 257ms/step - loss: 11306577920.0000 - val_loss: 19510810624.0000 Epoch 52/100 8/8 - 1s - 107ms/step - loss: 11324113920.0000 - val_loss: 19367768064.0000 Epoch 53/100 8/8 - 1s - 105ms/step - loss: 11210382336.0000 - val_loss: 19319060480.0000 Epoch 54/100 8/8 - 1s - 104ms/step - loss: 11159977984.0000 - val_loss: 19204632576.0000 Epoch 55/100 8/8 - 1s - 106ms/step - loss: 11105344512.0000 - val_loss: 19092064256.0000 Epoch 56/100 8/8 - 2s - 256ms/step - loss: 11054464000.0000 - val_loss: 19090503680.0000 Epoch 57/100 8/8 - 0s - 55ms/step - loss: 10996736000.0000 - val_loss: 19174834176.0000 Epoch 58/100 8/8 - 1s - 106ms/step - loss: 10940626944.0000 - val_loss: 19090456576.0000 Epoch 59/100 8/8 - 1s - 104ms/step - loss: 10915001344.0000 - val_loss: 18939678720.0000 Epoch 60/100 8/8 - 1s - 108ms/step - loss: 10868730880.0000 - val_loss: 18882578432.0000 Epoch 61/100 8/8 - 0s - 57ms/step - loss: 10833651712.0000 - val_loss: 18958907392.0000 Epoch 62/100 8/8 - 0s - 54ms/step - loss: 10799295488.0000 - val_loss: 18885236736.0000 Epoch 63/100 8/8 - 1s - 177ms/step - loss: 10750878720.0000 - val_loss: 18799675392.0000 Epoch 64/100 8/8 - 1s - 106ms/step - loss: 10728214528.0000 - val_loss: 18739951616.0000 Epoch 65/100 8/8 - 1s - 104ms/step - loss: 10744617984.0000 - val_loss: 18691692544.0000 Epoch 66/100 8/8 - 0s - 55ms/step - loss: 10772488192.0000 - val_loss: 18951618560.0000 Epoch 67/100 8/8 - 1s - 106ms/step - loss: 10715024384.0000 - val_loss: 18685167616.0000 Epoch 68/100 8/8 - 0s - 55ms/step - loss: 10676811776.0000 - val_loss: 18928121856.0000 Epoch 69/100 8/8 - 0s - 55ms/step - loss: 10643721216.0000 - val_loss: 18756646912.0000 Epoch 70/100 8/8 - 0s - 55ms/step - loss: 10790673408.0000 - val_loss: 18825242624.0000 Epoch 71/100 8/8 - 2s - 205ms/step - loss: 10744395776.0000 - val_loss: 18521911296.0000 Epoch 72/100 8/8 - 0s - 54ms/step - loss: 10577290240.0000 - val_loss: 18604926976.0000 Epoch 73/100 8/8 - 0s - 55ms/step - loss: 10540840960.0000 - val_loss: 18851338240.0000 Epoch 74/100 8/8 - 0s - 55ms/step - loss: 10556408832.0000 - val_loss: 18657253376.0000 Epoch 75/100 8/8 - 0s - 55ms/step - loss: 10484264960.0000 - val_loss: 18559064064.0000 Epoch 76/100 8/8 - 1s - 106ms/step - loss: 10564759552.0000 - val_loss: 18404345856.0000 Epoch 77/100 8/8 - 1s - 111ms/step - loss: 10453718016.0000 - val_loss: 18335598592.0000 Epoch 78/100 8/8 - 2s - 193ms/step - loss: 10518384640.0000 - val_loss: 18327425024.0000 Epoch 79/100 8/8 - 1s - 106ms/step - loss: 10465306624.0000 - val_loss: 18318348288.0000 Epoch 80/100 8/8 - 1s - 106ms/step - loss: 10422926336.0000 - val_loss: 18144315392.0000 Epoch 81/100 8/8 - 0s - 54ms/step - loss: 10387391488.0000 - val_loss: 18175598592.0000 Epoch 82/100 8/8 - 0s - 55ms/step - loss: 10419236864.0000 - val_loss: 18348818432.0000 Epoch 83/100 8/8 - 0s - 55ms/step - loss: 10356454400.0000 - val_loss: 18190667776.0000 Epoch 84/100 8/8 - 0s - 55ms/step - loss: 10399023104.0000 - val_loss: 18207676416.0000 Epoch 85/100 8/8 - 0s - 55ms/step - loss: 10510418944.0000 - val_loss: 18445844480.0000 Epoch 85: early stopping Training finished...Loading the best model 2/2 ━━━━━━━━━━━━━━━━━━━━ 1s 468ms/step Score (RMSE): 134700.84375
In [78]:
chart_regression(pred_vgg1.flatten(),y_test)
Transfer Learning : VGG16 #2 (trainable = true)¶
In [79]:
# Transfer learning
from keras.applications.vgg16 import VGG16
filepath = './dnn/best_weights-vgg2.keras'
checkpointer = ModelCheckpoint(filepath=filepath, verbose=0, save_best_only=True)
adam_optimizer = Adam(learning_rate=0.005, beta_1=0.9, beta_2=0.999, epsilon=1e-07)
sgd_optimizer = SGD(learning_rate=0.005, momentum=0.9, nesterov=True)
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=5, verbose=1, mode='auto')
# Transfer - Learning : VGG 16
vgg = VGG16(weights='imagenet', include_top=False, input_shape=(128, 128, 3))
# Freeze the weights
for layer in vgg.layers:
layer.trainable = True
conv_layer = Conv2D(128, kernel_size=3, activation='relu')(vgg.output)
max_pool = MaxPooling2D(pool_size=(2, 2))(conv_layer)
vgg_out = Flatten()(max_pool)
visible1 = vgg.input
# Tabular data input model
visible2 = Input(shape=(x.shape[1],))
dense1 = Dense(2048, activation='relu')(visible2)
dense2 = Dense(1024, activation='relu')(dense1)
dense3 = Dense(512, activation='relu')(dense2)
merge = concatenate([vgg_out, dense3])
# hidden1 = Dense(512, activation='relu')(merge)
# hidden2 = Dense(256, activation='relu')(hidden1)
# output = Dense(1)(hidden2)
output = Dense(1)(merge)
model = Model(inputs=[visible1, visible2], outputs=output)
# print(model.summary())
model.compile(loss='mean_squared_error', optimizer=adam_optimizer)
model.fit([x_train_img, x_train], y_train,
validation_data=([x_test_img, x_test], y_test),
callbacks=[monitor, checkpointer],verbose=2,epochs=100)
print('Training finished...Loading the best model')
print()
# model.load_weights(filepath) # load weights from best model
pred_vgg2 = model.predict([x_test_img, x_test])
score_vgg2 = np.sqrt(metrics.mean_squared_error(pred_vgg2,y_test))
print("Score (RMSE): {}".format(score_vgg2))
Epoch 1/100 8/8 - 11s - 1s/step - loss: 98477956510253056.0000 - val_loss: 303063465984.0000 Epoch 2/100 8/8 - 1s - 139ms/step - loss: 2107533688832.0000 - val_loss: 369756176384.0000 Epoch 3/100 8/8 - 1s - 139ms/step - loss: 356539531264.0000 - val_loss: 367158263808.0000 Epoch 4/100 8/8 - 2s - 257ms/step - loss: 269744537600.0000 - val_loss: 240790159360.0000 Epoch 5/100 8/8 - 2s - 239ms/step - loss: 100941979648.0000 - val_loss: 24683929600.0000 Epoch 6/100 8/8 - 1s - 139ms/step - loss: 53171990528.0000 - val_loss: 41044328448.0000 Epoch 7/100 8/8 - 1s - 139ms/step - loss: 36210229248.0000 - val_loss: 41772793856.0000 Epoch 8/100 8/8 - 1s - 139ms/step - loss: 29340475392.0000 - val_loss: 27252961280.0000 Epoch 9/100 8/8 - 2s - 242ms/step - loss: 30167261184.0000 - val_loss: 24614602752.0000 Epoch 10/100 8/8 - 1s - 142ms/step - loss: 36199636992.0000 - val_loss: 26172733440.0000 Epoch 11/100 8/8 - 1s - 140ms/step - loss: 23779014656.0000 - val_loss: 27124082688.0000 Epoch 12/100 8/8 - 3s - 363ms/step - loss: 24989556736.0000 - val_loss: 24239321088.0000 Epoch 13/100 8/8 - 2s - 250ms/step - loss: 24240252928.0000 - val_loss: 24226115584.0000 Epoch 14/100 8/8 - 1s - 140ms/step - loss: 23725441024.0000 - val_loss: 28107677696.0000 Epoch 15/100 8/8 - 1s - 141ms/step - loss: 26673543168.0000 - val_loss: 28147427328.0000 Epoch 16/100 8/8 - 1s - 141ms/step - loss: 25089722368.0000 - val_loss: 28518916096.0000 Epoch 17/100 8/8 - 1s - 140ms/step - loss: 24446169088.0000 - val_loss: 26050238464.0000 Epoch 18/100 8/8 - 2s - 244ms/step - loss: 20350199808.0000 - val_loss: 20663142400.0000 Epoch 19/100 8/8 - 1s - 141ms/step - loss: 16470412288.0000 - val_loss: 22159106048.0000 Epoch 20/100 8/8 - 2s - 248ms/step - loss: 15757091840.0000 - val_loss: 19971659776.0000 Epoch 21/100 8/8 - 3s - 387ms/step - loss: 16336232448.0000 - val_loss: 17648381952.0000 Epoch 22/100 8/8 - 2s - 251ms/step - loss: 14437679104.0000 - val_loss: 16961068032.0000 Epoch 23/100 8/8 - 1s - 141ms/step - loss: 13898177536.0000 - val_loss: 21335939072.0000 Epoch 24/100 8/8 - 2s - 247ms/step - loss: 15474721792.0000 - val_loss: 14793943040.0000 Epoch 25/100 8/8 - 2s - 244ms/step - loss: 11502510080.0000 - val_loss: 14733190144.0000 Epoch 26/100 8/8 - 2s - 241ms/step - loss: 10533182464.0000 - val_loss: 14276973568.0000 Epoch 27/100 8/8 - 3s - 323ms/step - loss: 10446988288.0000 - val_loss: 13208377344.0000 Epoch 28/100 8/8 - 2s - 244ms/step - loss: 10061524992.0000 - val_loss: 12703187968.0000 Epoch 29/100 8/8 - 1s - 140ms/step - loss: 9188671488.0000 - val_loss: 13632504832.0000 Epoch 30/100 8/8 - 2s - 273ms/step - loss: 9257372672.0000 - val_loss: 12013086720.0000 Epoch 31/100 8/8 - 1s - 142ms/step - loss: 9282802688.0000 - val_loss: 12347857920.0000 Epoch 32/100 8/8 - 1s - 140ms/step - loss: 8902043648.0000 - val_loss: 12226070528.0000 Epoch 33/100 8/8 - 1s - 140ms/step - loss: 9030254592.0000 - val_loss: 13165268992.0000 Epoch 34/100 8/8 - 2s - 242ms/step - loss: 9161007104.0000 - val_loss: 11707984896.0000 Epoch 35/100 8/8 - 2s - 243ms/step - loss: 8301892096.0000 - val_loss: 11574075392.0000 Epoch 36/100 8/8 - 1s - 139ms/step - loss: 7976644096.0000 - val_loss: 12180003840.0000 Epoch 37/100 8/8 - 1s - 140ms/step - loss: 8459735552.0000 - val_loss: 13380633600.0000 Epoch 38/100 8/8 - 3s - 354ms/step - loss: 8173901824.0000 - val_loss: 11080372224.0000 Epoch 39/100 8/8 - 1s - 140ms/step - loss: 8150966272.0000 - val_loss: 11342439424.0000 Epoch 40/100 8/8 - 2s - 243ms/step - loss: 8596393984.0000 - val_loss: 10848686080.0000 Epoch 41/100 8/8 - 1s - 139ms/step - loss: 8398706688.0000 - val_loss: 11815157760.0000 Epoch 42/100 8/8 - 1s - 139ms/step - loss: 7826038272.0000 - val_loss: 11413584896.0000 Epoch 43/100 8/8 - 1s - 139ms/step - loss: 7517352448.0000 - val_loss: 12484359168.0000 Epoch 44/100 8/8 - 1s - 139ms/step - loss: 7593542656.0000 - val_loss: 11686118400.0000 Epoch 45/100 8/8 - 1s - 139ms/step - loss: 7215876608.0000 - val_loss: 12243982336.0000 Epoch 45: early stopping Training finished...Loading the best model 2/2 ━━━━━━━━━━━━━━━━━━━━ 1s 467ms/step Score (RMSE): 110652.5234375
In [80]:
chart_regression(pred_vgg2.flatten(),y_test)
Transfer Learning : ResNet50¶
In [87]:
from keras.applications import ResNet50
from keras.layers import Input, Dense, Flatten, Conv2D, Dropout, concatenate, MaxPooling2D
from keras.models import Model
from keras.optimizers import Adam, SGD
from keras.callbacks import ModelCheckpoint, EarlyStopping
import numpy as np
from sklearn import metrics
filepath = './dnn/best_weights-resNet1.keras'
checkpointer = ModelCheckpoint(filepath=filepath, verbose=0, save_best_only=True)
adam_optimizer = Adam(learning_rate=0.009, beta_1=0.9, beta_2=0.999, epsilon=1e-07)
sgd_optimizer = SGD(learning_rate=0.005, momentum=0.9, nesterov=True)
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=25, verbose=1, mode='auto')
resNet = ResNet50(weights='imagenet', include_top=False, input_shape=(128, 128, 3))
# Freeze the ResNet layers
for layer in resNet.layers:
layer.trainable = False
conv_layer = Conv2D(128, kernel_size=4, activation='relu')(resNet.output)
# max_pool = MaxPooling2D(pool_size=(2,2))(conv_layer)
# res_out = Flatten()(max_pool)
flatten_res = Flatten()(conv_layer)
# Tabular data input (dense layers)
visible2 = Input(shape=(x.shape[1],))
dense1 = Dense(512, activation='relu')(visible2)
dense2 = Dense(1024, activation='relu')(dense1)
# dense2 = Dropout(0.5)(dense2)
dense3 = Dense(1024, activation='relu')(dense2)
merge = concatenate([flatten_res, dense3])
hidden1 = Dense(2048, activation='relu')(merge)
# hidden1 = Dropout(0.5)(hidden1)
hidden2 = Dense(1024, activation='relu')(hidden1)
hidden2 = Dense(1024, activation='relu')(hidden2)
# hidden2 = Dropout(0.5)(hidden2)
hidden3 = Dense(512, activation='relu')(hidden2)
output = Dense(1)(hidden3)
# output = Dense(1)(merge)
model = Model(inputs=[resNet.input, visible2], outputs=output)
model.compile(optimizer=adam_optimizer, loss='mean_squared_error')
model.fit([x_train_img, x_train], y_train,
validation_data=([x_test_img, x_test], y_test),
callbacks=[monitor, checkpointer], verbose=2, epochs=100)
print('Training finished...Loading the best model')
model.load_weights(filepath)
pred_resNet = model.predict([x_test_img, x_test])
score_resNet = np.sqrt(metrics.mean_squared_error(pred_resNet, y_test))
print("Score (RMSE): {}".format(score_resNet))
Epoch 1/100 8/8 - 20s - 3s/step - loss: 343688642560.0000 - val_loss: 368869212160.0000 Epoch 2/100 8/8 - 2s - 208ms/step - loss: 355331997696.0000 - val_loss: 357225103360.0000 Epoch 3/100 8/8 - 2s - 196ms/step - loss: 217283854336.0000 - val_loss: 35435380736.0000 Epoch 4/100 8/8 - 4s - 461ms/step - loss: 81378050048.0000 - val_loss: 34845188096.0000 Epoch 5/100 8/8 - 0s - 60ms/step - loss: 39354314752.0000 - val_loss: 39310811136.0000 Epoch 6/100 8/8 - 2s - 216ms/step - loss: 24057284608.0000 - val_loss: 26119258112.0000 Epoch 7/100 8/8 - 0s - 43ms/step - loss: 18042099712.0000 - val_loss: 27164329984.0000 Epoch 8/100 8/8 - 2s - 242ms/step - loss: 16030982144.0000 - val_loss: 22946465792.0000 Epoch 9/100 8/8 - 2s - 194ms/step - loss: 14523408384.0000 - val_loss: 20112242688.0000 Epoch 10/100 8/8 - 2s - 197ms/step - loss: 11718484992.0000 - val_loss: 17581297664.0000 Epoch 11/100 8/8 - 3s - 369ms/step - loss: 11536602112.0000 - val_loss: 16853346304.0000 Epoch 12/100 8/8 - 2s - 205ms/step - loss: 11934524416.0000 - val_loss: 16775670784.0000 Epoch 13/100 8/8 - 3s - 351ms/step - loss: 10680504320.0000 - val_loss: 16169242624.0000 Epoch 14/100 8/8 - 0s - 57ms/step - loss: 11737072640.0000 - val_loss: 18993100800.0000 Epoch 15/100 8/8 - 0s - 43ms/step - loss: 12300834816.0000 - val_loss: 20013543424.0000 Epoch 16/100 8/8 - 0s - 43ms/step - loss: 12530514944.0000 - val_loss: 19980361728.0000 Epoch 17/100 8/8 - 0s - 42ms/step - loss: 10984398848.0000 - val_loss: 18687203328.0000 Epoch 18/100 8/8 - 0s - 42ms/step - loss: 10752083968.0000 - val_loss: 18892447744.0000 Epoch 19/100 8/8 - 2s - 199ms/step - loss: 9637939200.0000 - val_loss: 12993325056.0000 Epoch 20/100 8/8 - 0s - 43ms/step - loss: 8086447616.0000 - val_loss: 13412659200.0000 Epoch 21/100 8/8 - 2s - 198ms/step - loss: 8887006208.0000 - val_loss: 12339708928.0000 Epoch 22/100 8/8 - 2s - 192ms/step - loss: 7603577856.0000 - val_loss: 12130540544.0000 Epoch 23/100 8/8 - 0s - 43ms/step - loss: 7191845888.0000 - val_loss: 12283540480.0000 Epoch 24/100 8/8 - 2s - 197ms/step - loss: 7630202880.0000 - val_loss: 11082414080.0000 Epoch 25/100 8/8 - 0s - 43ms/step - loss: 7266596864.0000 - val_loss: 11426635776.0000 Epoch 26/100 8/8 - 0s - 44ms/step - loss: 6850041856.0000 - val_loss: 11277194240.0000 Epoch 27/100 8/8 - 2s - 226ms/step - loss: 7229795840.0000 - val_loss: 11029869568.0000 Epoch 28/100 8/8 - 2s - 199ms/step - loss: 6804054016.0000 - val_loss: 10288318464.0000 Epoch 29/100 8/8 - 0s - 43ms/step - loss: 6173859328.0000 - val_loss: 11615148032.0000 Epoch 30/100 8/8 - 0s - 43ms/step - loss: 6797023232.0000 - val_loss: 12913753088.0000 Epoch 31/100 8/8 - 0s - 43ms/step - loss: 7227819520.0000 - val_loss: 10830779392.0000 Epoch 32/100 8/8 - 0s - 43ms/step - loss: 8002421760.0000 - val_loss: 10657727488.0000 Epoch 33/100 8/8 - 0s - 43ms/step - loss: 6749570560.0000 - val_loss: 16542325760.0000 Epoch 34/100 8/8 - 0s - 43ms/step - loss: 9672331264.0000 - val_loss: 17578686464.0000 Epoch 35/100 8/8 - 2s - 279ms/step - loss: 7330137088.0000 - val_loss: 10097063936.0000 Epoch 36/100 8/8 - 2s - 208ms/step - loss: 6959677440.0000 - val_loss: 9678855168.0000 Epoch 37/100 8/8 - 0s - 44ms/step - loss: 7266845184.0000 - val_loss: 12130420736.0000 Epoch 38/100 8/8 - 3s - 350ms/step - loss: 5950133248.0000 - val_loss: 9031878656.0000 Epoch 39/100 8/8 - 0s - 56ms/step - loss: 4810904576.0000 - val_loss: 11541922816.0000 Epoch 40/100 8/8 - 0s - 45ms/step - loss: 5472159744.0000 - val_loss: 10176888832.0000 Epoch 41/100 8/8 - 0s - 43ms/step - loss: 5512726016.0000 - val_loss: 10960668672.0000 Epoch 42/100 8/8 - 0s - 43ms/step - loss: 6202236928.0000 - val_loss: 12790712320.0000 Epoch 43/100 8/8 - 0s - 43ms/step - loss: 6791001600.0000 - val_loss: 12081680384.0000 Epoch 44/100 8/8 - 0s - 43ms/step - loss: 4952603648.0000 - val_loss: 13286253568.0000 Epoch 45/100 8/8 - 2s - 233ms/step - loss: 5825606144.0000 - val_loss: 8345907200.0000 Epoch 46/100 8/8 - 0s - 44ms/step - loss: 4956800000.0000 - val_loss: 9456014336.0000 Epoch 47/100 8/8 - 0s - 43ms/step - loss: 3720694016.0000 - val_loss: 10123528192.0000 Epoch 48/100 8/8 - 0s - 43ms/step - loss: 3572302336.0000 - val_loss: 9395789824.0000 Epoch 49/100 8/8 - 2s - 200ms/step - loss: 5560272896.0000 - val_loss: 7923381760.0000 Epoch 50/100 8/8 - 0s - 43ms/step - loss: 4261129216.0000 - val_loss: 10889527296.0000 Epoch 51/100 8/8 - 0s - 43ms/step - loss: 5020419072.0000 - val_loss: 11168991232.0000 Epoch 52/100 8/8 - 0s - 43ms/step - loss: 7489695232.0000 - val_loss: 9531190272.0000 Epoch 53/100 8/8 - 0s - 43ms/step - loss: 4580328448.0000 - val_loss: 9275280384.0000 Epoch 54/100 8/8 - 0s - 43ms/step - loss: 3675055616.0000 - val_loss: 8823876608.0000 Epoch 55/100 8/8 - 2s - 222ms/step - loss: 3029416960.0000 - val_loss: 7804430848.0000 Epoch 56/100 8/8 - 0s - 44ms/step - loss: 3477314304.0000 - val_loss: 8397595136.0000 Epoch 57/100 8/8 - 0s - 43ms/step - loss: 3851264512.0000 - val_loss: 10806586368.0000 Epoch 58/100 8/8 - 0s - 43ms/step - loss: 4168550656.0000 - val_loss: 10045679616.0000 Epoch 59/100 8/8 - 0s - 43ms/step - loss: 3604981504.0000 - val_loss: 8787392512.0000 Epoch 60/100 8/8 - 0s - 43ms/step - loss: 3183405568.0000 - val_loss: 11372168192.0000 Epoch 61/100 8/8 - 0s - 43ms/step - loss: 4860128768.0000 - val_loss: 10329928704.0000 Epoch 62/100 8/8 - 0s - 43ms/step - loss: 3874722304.0000 - val_loss: 9601417216.0000 Epoch 63/100 8/8 - 0s - 43ms/step - loss: 4176076032.0000 - val_loss: 8450109440.0000 Epoch 64/100 8/8 - 0s - 43ms/step - loss: 3569046016.0000 - val_loss: 8636995584.0000 Epoch 65/100 8/8 - 0s - 43ms/step - loss: 2799125760.0000 - val_loss: 8914883584.0000 Epoch 66/100 8/8 - 0s - 43ms/step - loss: 2769716224.0000 - val_loss: 8153819136.0000 Epoch 67/100 8/8 - 0s - 43ms/step - loss: 2366477568.0000 - val_loss: 7894991360.0000 Epoch 68/100 8/8 - 0s - 43ms/step - loss: 1827070208.0000 - val_loss: 9157101568.0000 Epoch 69/100 8/8 - 0s - 44ms/step - loss: 2456001280.0000 - val_loss: 7882080768.0000 Epoch 70/100 8/8 - 0s - 43ms/step - loss: 1920451200.0000 - val_loss: 8750027776.0000 Epoch 71/100 8/8 - 0s - 43ms/step - loss: 2360403712.0000 - val_loss: 9532099584.0000 Epoch 72/100 8/8 - 0s - 43ms/step - loss: 1908828416.0000 - val_loss: 9047440384.0000 Epoch 73/100 8/8 - 0s - 44ms/step - loss: 1808188928.0000 - val_loss: 8158190592.0000 Epoch 74/100 8/8 - 0s - 44ms/step - loss: 1572947328.0000 - val_loss: 8420847104.0000 Epoch 75/100 8/8 - 0s - 43ms/step - loss: 1731808000.0000 - val_loss: 9543692288.0000 Epoch 76/100 8/8 - 0s - 43ms/step - loss: 2079366144.0000 - val_loss: 8558274560.0000 Epoch 77/100 8/8 - 0s - 43ms/step - loss: 1567895296.0000 - val_loss: 9269611520.0000 Epoch 78/100 8/8 - 0s - 43ms/step - loss: 1627222784.0000 - val_loss: 8911315968.0000 Epoch 79/100 8/8 - 0s - 43ms/step - loss: 1182259712.0000 - val_loss: 9657309184.0000 Epoch 80/100 8/8 - 0s - 43ms/step - loss: 1372559872.0000 - val_loss: 9690188800.0000 Epoch 80: early stopping Training finished...Loading the best model 2/2 ━━━━━━━━━━━━━━━━━━━━ 9s 5s/step Score (RMSE): 88342.6953125
In [64]:
chart_regression(pred_resNet.flatten(),y_test)
Transfer Learning : ResNet50 #2¶
In [83]:
# Transfer learning
from keras.applications import ResNet50
filepath = './dnn/best_weights-resNet2.keras'
checkpointer = ModelCheckpoint(filepath=filepath, verbose=0, save_best_only=True)
for i in range(0,5):
print("======================================= " , i)
adam_optimizer = Adam(learning_rate=0.0075, beta_1=0.9, beta_2=0.999, epsilon=1e-07)
sgd_optimizer = SGD(learning_rate=1e-4 , momentum=0.9, nesterov=True)
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=25, verbose=1, mode='auto')
resNet = ResNet50(weights='imagenet', include_top=False, input_shape=(128, 128, 3))
for layer in resNet.layers:
layer.trainable = False
conv_layer = Conv2D(128, kernel_size=4, activation='relu')(resNet.output)
flatten_res = Flatten()(conv_layer)
visible1 = resNet.input
# Tabular data input model
visible2 = Input(shape=(x.shape[1],))
dense1 = Dense(512, activation='relu')(visible2)
dense2 = Dense(1024, activation='relu')(dense1)
dense3 = Dense(2048, activation='relu')(dense2)
dense4 = Dense(512, activation='relu')(dense3)
merge = concatenate([flatten_res, dense4])
hidden1 = Dense(512, activation='relu')(merge)
hidden2 = Dense(1024, activation='relu')(hidden1)
hidden3 = Dense(2048, activation='relu')(hidden2)
hidden4 = Dense(2048, activation='relu')(hidden3)
hidden5 = Dense(512, activation='relu')(hidden4)
output = Dense(1)(hidden5)
model = Model(inputs=[visible1, visible2], outputs=output)
# print(model.summary())
model.compile(loss='mean_squared_error', optimizer=adam_optimizer)
model.fit([x_train_img, x_train], y_train,
validation_data=([x_test_img, x_test], y_test),
callbacks=[monitor, checkpointer],verbose=2,epochs=150)
print('Training finished...Loading the best model')
print()
model.load_weights(filepath) # load weights from best model
pred_resNet2 = model.predict([x_test_img, x_test])
score_resNet2 = np.sqrt(metrics.mean_squared_error(pred_resNet2,y_test))
print("Score (RMSE): {}".format(score_resNet2))
======================================= 0 Epoch 1/150 8/8 - 23s - 3s/step - loss: 319721209856.0000 - val_loss: 370657591296.0000 Epoch 2/150 8/8 - 2s - 238ms/step - loss: 290786541568.0000 - val_loss: 227072081920.0000 Epoch 3/150 8/8 - 4s - 552ms/step - loss: 118001278976.0000 - val_loss: 29021890560.0000 Epoch 4/150 8/8 - 0s - 55ms/step - loss: 61190516736.0000 - val_loss: 52206501888.0000 Epoch 5/150 8/8 - 0s - 45ms/step - loss: 35417866240.0000 - val_loss: 55066161152.0000 Epoch 6/150 8/8 - 2s - 216ms/step - loss: 32365518848.0000 - val_loss: 27514355712.0000 Epoch 7/150 8/8 - 2s - 215ms/step - loss: 21655685120.0000 - val_loss: 22070456320.0000 Epoch 8/150 8/8 - 2s - 218ms/step - loss: 17482160128.0000 - val_loss: 22067984384.0000 Epoch 9/150 8/8 - 5s - 606ms/step - loss: 15329366016.0000 - val_loss: 20180443136.0000 Epoch 10/150 8/8 - 2s - 282ms/step - loss: 13808812032.0000 - val_loss: 17435619328.0000 Epoch 11/150 8/8 - 0s - 57ms/step - loss: 15548213248.0000 - val_loss: 17636808704.0000 Epoch 12/150 8/8 - 2s - 219ms/step - loss: 10538602496.0000 - val_loss: 13450860544.0000 Epoch 13/150 8/8 - 0s - 43ms/step - loss: 11992697856.0000 - val_loss: 17736079360.0000 Epoch 14/150 8/8 - 0s - 43ms/step - loss: 10125106176.0000 - val_loss: 17297338368.0000 Epoch 15/150 8/8 - 0s - 44ms/step - loss: 9144175616.0000 - val_loss: 17652037632.0000 Epoch 16/150 8/8 - 3s - 397ms/step - loss: 8417947648.0000 - val_loss: 10573316096.0000 Epoch 17/150 8/8 - 0s - 58ms/step - loss: 7398643200.0000 - val_loss: 10875116544.0000 Epoch 18/150 8/8 - 0s - 42ms/step - loss: 6723719168.0000 - val_loss: 11920780288.0000 Epoch 19/150 8/8 - 2s - 218ms/step - loss: 6539856384.0000 - val_loss: 8697222144.0000 Epoch 20/150 8/8 - 0s - 44ms/step - loss: 7597569536.0000 - val_loss: 12927842304.0000 Epoch 21/150 8/8 - 0s - 44ms/step - loss: 8304240640.0000 - val_loss: 12695304192.0000 Epoch 22/150 8/8 - 2s - 272ms/step - loss: 6493758464.0000 - val_loss: 8583385088.0000 Epoch 23/150 8/8 - 0s - 57ms/step - loss: 7047372800.0000 - val_loss: 8963922944.0000 Epoch 24/150 8/8 - 2s - 215ms/step - loss: 5992011776.0000 - val_loss: 8486186496.0000 Epoch 25/150 8/8 - 0s - 44ms/step - loss: 5508170240.0000 - val_loss: 10827868160.0000 Epoch 26/150 8/8 - 0s - 43ms/step - loss: 7335631872.0000 - val_loss: 10224600064.0000 Epoch 27/150 8/8 - 0s - 44ms/step - loss: 7970451968.0000 - val_loss: 9805443072.0000 Epoch 28/150 8/8 - 0s - 45ms/step - loss: 7085118464.0000 - val_loss: 12077138944.0000 Epoch 29/150 8/8 - 0s - 44ms/step - loss: 6370752512.0000 - val_loss: 12534009856.0000 Epoch 30/150 8/8 - 2s - 273ms/step - loss: 6375626240.0000 - val_loss: 7832850944.0000 Epoch 31/150 8/8 - 0s - 54ms/step - loss: 6371791872.0000 - val_loss: 14033116160.0000 Epoch 32/150 8/8 - 0s - 44ms/step - loss: 6926347264.0000 - val_loss: 10506520576.0000 Epoch 33/150 8/8 - 2s - 214ms/step - loss: 4960961536.0000 - val_loss: 7310561792.0000 Epoch 34/150 8/8 - 0s - 44ms/step - loss: 5140932096.0000 - val_loss: 9926811648.0000 Epoch 35/150 8/8 - 0s - 44ms/step - loss: 4800078848.0000 - val_loss: 8254832128.0000 Epoch 36/150 8/8 - 0s - 44ms/step - loss: 4039965952.0000 - val_loss: 7583156736.0000 Epoch 37/150 8/8 - 0s - 44ms/step - loss: 3736108288.0000 - val_loss: 12681845760.0000 Epoch 38/150 8/8 - 0s - 44ms/step - loss: 7528453632.0000 - val_loss: 10006091776.0000 Epoch 39/150 8/8 - 0s - 44ms/step - loss: 5030610432.0000 - val_loss: 15609115648.0000 Epoch 40/150 8/8 - 0s - 44ms/step - loss: 7149642240.0000 - val_loss: 7564073984.0000 Epoch 41/150 8/8 - 0s - 44ms/step - loss: 6850776576.0000 - val_loss: 9907296256.0000 Epoch 42/150 8/8 - 0s - 44ms/step - loss: 7753598464.0000 - val_loss: 10597488640.0000 Epoch 43/150 8/8 - 2s - 226ms/step - loss: 4941890560.0000 - val_loss: 6841288192.0000 Epoch 44/150 8/8 - 0s - 45ms/step - loss: 4671653888.0000 - val_loss: 11344503808.0000 Epoch 45/150 8/8 - 0s - 44ms/step - loss: 4224482304.0000 - val_loss: 7633529856.0000 Epoch 46/150 8/8 - 0s - 44ms/step - loss: 5938935296.0000 - val_loss: 12315461632.0000 Epoch 47/150 8/8 - 0s - 44ms/step - loss: 6503223296.0000 - val_loss: 7215206400.0000 Epoch 48/150 8/8 - 0s - 44ms/step - loss: 8211675648.0000 - val_loss: 6928662528.0000 Epoch 49/150 8/8 - 0s - 44ms/step - loss: 6465079296.0000 - val_loss: 19284856832.0000 Epoch 50/150 8/8 - 0s - 43ms/step - loss: 6603610112.0000 - val_loss: 8105010688.0000 Epoch 51/150 8/8 - 0s - 44ms/step - loss: 4362271232.0000 - val_loss: 8284277248.0000 Epoch 52/150 8/8 - 0s - 44ms/step - loss: 3973266176.0000 - val_loss: 6941509632.0000 Epoch 53/150 8/8 - 0s - 44ms/step - loss: 3667523840.0000 - val_loss: 7579753984.0000 Epoch 54/150 8/8 - 0s - 45ms/step - loss: 2748536576.0000 - val_loss: 7724619776.0000 Epoch 55/150 8/8 - 0s - 44ms/step - loss: 2554678784.0000 - val_loss: 6882731520.0000 Epoch 56/150 8/8 - 0s - 44ms/step - loss: 2334604544.0000 - val_loss: 8000998400.0000 Epoch 57/150 8/8 - 0s - 44ms/step - loss: 3215058688.0000 - val_loss: 9302465536.0000 Epoch 58/150 8/8 - 0s - 46ms/step - loss: 2165706752.0000 - val_loss: 8269378048.0000 Epoch 59/150 8/8 - 2s - 222ms/step - loss: 2756900096.0000 - val_loss: 6685319680.0000 Epoch 60/150 8/8 - 0s - 44ms/step - loss: 4076736768.0000 - val_loss: 7499158528.0000 Epoch 61/150 8/8 - 0s - 44ms/step - loss: 2776761344.0000 - val_loss: 8415473152.0000 Epoch 62/150 8/8 - 0s - 44ms/step - loss: 2841111552.0000 - val_loss: 7466414592.0000 Epoch 63/150 8/8 - 0s - 44ms/step - loss: 2600123136.0000 - val_loss: 8024599040.0000 Epoch 64/150 8/8 - 0s - 44ms/step - loss: 2333960960.0000 - val_loss: 10717296640.0000 Epoch 65/150 8/8 - 0s - 44ms/step - loss: 2490125312.0000 - val_loss: 9897322496.0000 Epoch 66/150 8/8 - 0s - 44ms/step - loss: 2540238080.0000 - val_loss: 7674310656.0000 Epoch 67/150 8/8 - 0s - 45ms/step - loss: 1692400512.0000 - val_loss: 8339127296.0000 Epoch 68/150 8/8 - 0s - 44ms/step - loss: 2159478016.0000 - val_loss: 11031647232.0000 Epoch 69/150 8/8 - 0s - 44ms/step - loss: 2848952064.0000 - val_loss: 14270696448.0000 Epoch 70/150 8/8 - 0s - 44ms/step - loss: 3938407424.0000 - val_loss: 9517571072.0000 Epoch 71/150 8/8 - 0s - 45ms/step - loss: 2908075520.0000 - val_loss: 13247438848.0000 Epoch 72/150 8/8 - 0s - 45ms/step - loss: 3820322816.0000 - val_loss: 10242641920.0000 Epoch 73/150 8/8 - 0s - 45ms/step - loss: 2515527680.0000 - val_loss: 7510495744.0000 Epoch 74/150 8/8 - 0s - 44ms/step - loss: 1700069632.0000 - val_loss: 7690868224.0000 Epoch 75/150 8/8 - 0s - 44ms/step - loss: 2163100672.0000 - val_loss: 8824305664.0000 Epoch 76/150 8/8 - 0s - 44ms/step - loss: 2006841856.0000 - val_loss: 8428301824.0000 Epoch 77/150 8/8 - 0s - 44ms/step - loss: 2896235008.0000 - val_loss: 9930632192.0000 Epoch 78/150 8/8 - 0s - 44ms/step - loss: 2806994944.0000 - val_loss: 13971971072.0000 Epoch 79/150 8/8 - 0s - 44ms/step - loss: 2777547776.0000 - val_loss: 8751472640.0000 Epoch 80/150 8/8 - 0s - 44ms/step - loss: 1549833472.0000 - val_loss: 8155105280.0000 Epoch 81/150 8/8 - 0s - 44ms/step - loss: 1919553280.0000 - val_loss: 8938406912.0000 Epoch 82/150 8/8 - 0s - 44ms/step - loss: 1825622400.0000 - val_loss: 10938185728.0000 Epoch 83/150 8/8 - 0s - 44ms/step - loss: 1716802432.0000 - val_loss: 9716320256.0000 Epoch 84/150 8/8 - 0s - 45ms/step - loss: 2283477504.0000 - val_loss: 10222003200.0000 Epoch 84: early stopping ======================================= 1 Epoch 1/150 8/8 - 23s - 3s/step - loss: 323124264960.0000 - val_loss: 370221547520.0000 Epoch 2/150 8/8 - 0s - 53ms/step - loss: 280841584640.0000 - val_loss: 42515427328.0000 Epoch 3/150 8/8 - 0s - 44ms/step - loss: 48617910272.0000 - val_loss: 55012298752.0000 Epoch 4/150 8/8 - 0s - 44ms/step - loss: 33515941888.0000 - val_loss: 34180034560.0000 Epoch 5/150 8/8 - 0s - 44ms/step - loss: 24238788608.0000 - val_loss: 21874577408.0000 Epoch 6/150 8/8 - 0s - 44ms/step - loss: 19186792448.0000 - val_loss: 22241099776.0000 Epoch 7/150 8/8 - 0s - 44ms/step - loss: 16471012352.0000 - val_loss: 18573068288.0000 Epoch 8/150 8/8 - 0s - 44ms/step - loss: 13675579392.0000 - val_loss: 20579489792.0000 Epoch 9/150 8/8 - 0s - 44ms/step - loss: 13156483072.0000 - val_loss: 23870595072.0000 Epoch 10/150 8/8 - 0s - 44ms/step - loss: 15271145472.0000 - val_loss: 20938483712.0000 Epoch 11/150 8/8 - 0s - 44ms/step - loss: 15053608960.0000 - val_loss: 16290620416.0000 Epoch 12/150 8/8 - 0s - 45ms/step - loss: 12879557632.0000 - val_loss: 23330811904.0000 Epoch 13/150 8/8 - 0s - 44ms/step - loss: 13119328256.0000 - val_loss: 13680312320.0000 Epoch 14/150 8/8 - 0s - 44ms/step - loss: 13158654976.0000 - val_loss: 32319952896.0000 Epoch 15/150 8/8 - 0s - 43ms/step - loss: 19809118208.0000 - val_loss: 22007406592.0000 Epoch 16/150 8/8 - 0s - 46ms/step - loss: 16928901120.0000 - val_loss: 10907711488.0000 Epoch 17/150 8/8 - 0s - 44ms/step - loss: 10646664192.0000 - val_loss: 18891606016.0000 Epoch 18/150 8/8 - 0s - 43ms/step - loss: 9932808192.0000 - val_loss: 10771725312.0000 Epoch 19/150 8/8 - 0s - 44ms/step - loss: 7640130048.0000 - val_loss: 9787567104.0000 Epoch 20/150 8/8 - 0s - 44ms/step - loss: 6777099264.0000 - val_loss: 14609636352.0000 Epoch 21/150 8/8 - 0s - 44ms/step - loss: 7358688768.0000 - val_loss: 9857030144.0000 Epoch 22/150 8/8 - 0s - 43ms/step - loss: 7909317632.0000 - val_loss: 13521220608.0000 Epoch 23/150 8/8 - 0s - 44ms/step - loss: 6962408960.0000 - val_loss: 9131530240.0000 Epoch 24/150 8/8 - 0s - 44ms/step - loss: 6194423296.0000 - val_loss: 10061654016.0000 Epoch 25/150 8/8 - 0s - 44ms/step - loss: 5581868544.0000 - val_loss: 10461538304.0000 Epoch 26/150 8/8 - 0s - 44ms/step - loss: 6399563776.0000 - val_loss: 10648406016.0000 Epoch 27/150 8/8 - 0s - 44ms/step - loss: 5708452864.0000 - val_loss: 11247179776.0000 Epoch 28/150 8/8 - 0s - 44ms/step - loss: 8574018048.0000 - val_loss: 9227262976.0000 Epoch 29/150 8/8 - 0s - 44ms/step - loss: 6712791552.0000 - val_loss: 8725606400.0000 Epoch 30/150 8/8 - 0s - 44ms/step - loss: 4667999232.0000 - val_loss: 11859616768.0000 Epoch 31/150 8/8 - 0s - 44ms/step - loss: 5393193472.0000 - val_loss: 9648734208.0000 Epoch 32/150 8/8 - 0s - 44ms/step - loss: 6554749952.0000 - val_loss: 12750115840.0000 Epoch 33/150 8/8 - 0s - 44ms/step - loss: 6426609152.0000 - val_loss: 13898280960.0000 Epoch 34/150 8/8 - 0s - 44ms/step - loss: 5829546496.0000 - val_loss: 10843250688.0000 Epoch 35/150 8/8 - 0s - 44ms/step - loss: 5386816000.0000 - val_loss: 14843464704.0000 Epoch 36/150 8/8 - 0s - 44ms/step - loss: 6304473600.0000 - val_loss: 9510558720.0000 Epoch 37/150 8/8 - 0s - 45ms/step - loss: 6006458368.0000 - val_loss: 15671720960.0000 Epoch 38/150 8/8 - 0s - 44ms/step - loss: 7293254656.0000 - val_loss: 9463809024.0000 Epoch 39/150 8/8 - 0s - 44ms/step - loss: 4516019712.0000 - val_loss: 9224008704.0000 Epoch 40/150 8/8 - 0s - 44ms/step - loss: 4288998656.0000 - val_loss: 9594572800.0000 Epoch 41/150 8/8 - 0s - 44ms/step - loss: 4333754368.0000 - val_loss: 10188886016.0000 Epoch 42/150 8/8 - 0s - 44ms/step - loss: 4284501248.0000 - val_loss: 9757449216.0000 Epoch 43/150 8/8 - 0s - 45ms/step - loss: 4135467776.0000 - val_loss: 14799822848.0000 Epoch 44/150 8/8 - 0s - 46ms/step - loss: 5738757120.0000 - val_loss: 17976887296.0000 Epoch 45/150 8/8 - 0s - 44ms/step - loss: 8246518784.0000 - val_loss: 19484778496.0000 Epoch 46/150 8/8 - 0s - 44ms/step - loss: 7760026112.0000 - val_loss: 10313184256.0000 Epoch 47/150 8/8 - 0s - 44ms/step - loss: 5194813952.0000 - val_loss: 11470895104.0000 Epoch 48/150 8/8 - 0s - 44ms/step - loss: 5281802752.0000 - val_loss: 9331975168.0000 Epoch 49/150 8/8 - 0s - 45ms/step - loss: 4152193536.0000 - val_loss: 9266804736.0000 Epoch 50/150 8/8 - 0s - 44ms/step - loss: 5914046976.0000 - val_loss: 13175555072.0000 Epoch 51/150 8/8 - 0s - 44ms/step - loss: 5872590336.0000 - val_loss: 10324704256.0000 Epoch 52/150 8/8 - 0s - 44ms/step - loss: 5051201024.0000 - val_loss: 13209012224.0000 Epoch 53/150 8/8 - 0s - 45ms/step - loss: 4757768192.0000 - val_loss: 12152287232.0000 Epoch 54/150 8/8 - 0s - 45ms/step - loss: 4264729856.0000 - val_loss: 11725083648.0000 Epoch 54: early stopping ======================================= 2 Epoch 1/150 8/8 - 21s - 3s/step - loss: 344623808512.0000 - val_loss: 370955124736.0000 Epoch 2/150 8/8 - 0s - 54ms/step - loss: 546286370816.0000 - val_loss: 224578224128.0000 Epoch 3/150 8/8 - 0s - 43ms/step - loss: 263560888320.0000 - val_loss: 365780303872.0000 Epoch 4/150 8/8 - 0s - 44ms/step - loss: 354419146752.0000 - val_loss: 366570995712.0000 Epoch 5/150 8/8 - 0s - 43ms/step - loss: 311703764992.0000 - val_loss: 328785526784.0000 Epoch 6/150 8/8 - 0s - 43ms/step - loss: 153192284160.0000 - val_loss: 191294144512.0000 Epoch 7/150 8/8 - 0s - 44ms/step - loss: 66382794752.0000 - val_loss: 71911161856.0000 Epoch 8/150 8/8 - 0s - 44ms/step - loss: 43769565184.0000 - val_loss: 73560678400.0000 Epoch 9/150 8/8 - 0s - 44ms/step - loss: 32644128768.0000 - val_loss: 35337408512.0000 Epoch 10/150 8/8 - 0s - 44ms/step - loss: 19179958272.0000 - val_loss: 28929382400.0000 Epoch 11/150 8/8 - 0s - 44ms/step - loss: 13173669888.0000 - val_loss: 28569276416.0000 Epoch 12/150 8/8 - 0s - 46ms/step - loss: 12533525504.0000 - val_loss: 24265093120.0000 Epoch 13/150 8/8 - 0s - 44ms/step - loss: 11787664384.0000 - val_loss: 23159500800.0000 Epoch 14/150 8/8 - 0s - 43ms/step - loss: 10243992576.0000 - val_loss: 21414912000.0000 Epoch 15/150 8/8 - 0s - 43ms/step - loss: 10104990720.0000 - val_loss: 22558169088.0000 Epoch 16/150 8/8 - 0s - 43ms/step - loss: 10711566336.0000 - val_loss: 25117384704.0000 Epoch 17/150 8/8 - 0s - 44ms/step - loss: 10217789440.0000 - val_loss: 20548102144.0000 Epoch 18/150 8/8 - 0s - 43ms/step - loss: 8842452992.0000 - val_loss: 23685742592.0000 Epoch 19/150 8/8 - 0s - 44ms/step - loss: 8758758400.0000 - val_loss: 18865295360.0000 Epoch 20/150 8/8 - 0s - 44ms/step - loss: 7914476544.0000 - val_loss: 18802552832.0000 Epoch 21/150 8/8 - 0s - 44ms/step - loss: 9545149440.0000 - val_loss: 19648262144.0000 Epoch 22/150 8/8 - 0s - 43ms/step - loss: 7907062272.0000 - val_loss: 17209364480.0000 Epoch 23/150 8/8 - 0s - 44ms/step - loss: 7948295680.0000 - val_loss: 19794915328.0000 Epoch 24/150 8/8 - 0s - 44ms/step - loss: 7782191104.0000 - val_loss: 18811815936.0000 Epoch 25/150 8/8 - 0s - 44ms/step - loss: 6909862400.0000 - val_loss: 20675829760.0000 Epoch 26/150 8/8 - 0s - 44ms/step - loss: 7292512768.0000 - val_loss: 19528013824.0000 Epoch 27/150 8/8 - 0s - 44ms/step - loss: 6844777984.0000 - val_loss: 16698336256.0000 Epoch 28/150 8/8 - 0s - 44ms/step - loss: 6874917888.0000 - val_loss: 17318799360.0000 Epoch 29/150 8/8 - 0s - 44ms/step - loss: 7401374720.0000 - val_loss: 18347407360.0000 Epoch 30/150 8/8 - 0s - 44ms/step - loss: 6420854784.0000 - val_loss: 18002798592.0000 Epoch 31/150 8/8 - 0s - 44ms/step - loss: 6393824768.0000 - val_loss: 17272932352.0000 Epoch 32/150 8/8 - 0s - 44ms/step - loss: 6444038144.0000 - val_loss: 16520418304.0000 Epoch 33/150 8/8 - 0s - 45ms/step - loss: 6893662720.0000 - val_loss: 18202013696.0000 Epoch 34/150 8/8 - 0s - 47ms/step - loss: 6096175104.0000 - val_loss: 17366958080.0000 Epoch 35/150 8/8 - 0s - 44ms/step - loss: 7121977344.0000 - val_loss: 17174304768.0000 Epoch 36/150 8/8 - 0s - 45ms/step - loss: 5943210496.0000 - val_loss: 17019105280.0000 Epoch 37/150 8/8 - 0s - 44ms/step - loss: 6029882880.0000 - val_loss: 16938865664.0000 Epoch 38/150 8/8 - 0s - 44ms/step - loss: 5968013824.0000 - val_loss: 19029760000.0000 Epoch 39/150 8/8 - 0s - 44ms/step - loss: 8530981888.0000 - val_loss: 21932326912.0000 Epoch 40/150 8/8 - 0s - 45ms/step - loss: 8958773248.0000 - val_loss: 19374319616.0000 Epoch 41/150 8/8 - 0s - 44ms/step - loss: 8776313856.0000 - val_loss: 17294534656.0000 Epoch 42/150 8/8 - 0s - 44ms/step - loss: 7528290816.0000 - val_loss: 18980823040.0000 Epoch 43/150 8/8 - 0s - 44ms/step - loss: 6767010816.0000 - val_loss: 22390620160.0000 Epoch 44/150 8/8 - 0s - 44ms/step - loss: 6701046784.0000 - val_loss: 18354167808.0000 Epoch 45/150 8/8 - 0s - 44ms/step - loss: 7716420608.0000 - val_loss: 17131030528.0000 Epoch 46/150 8/8 - 0s - 44ms/step - loss: 5908751872.0000 - val_loss: 18244859904.0000 Epoch 47/150 8/8 - 0s - 44ms/step - loss: 6926004736.0000 - val_loss: 20134727680.0000 Epoch 48/150 8/8 - 0s - 44ms/step - loss: 6723584512.0000 - val_loss: 19048880128.0000 Epoch 49/150 8/8 - 0s - 44ms/step - loss: 5545666048.0000 - val_loss: 17418766336.0000 Epoch 50/150 8/8 - 0s - 44ms/step - loss: 5137236480.0000 - val_loss: 16449628160.0000 Epoch 51/150 8/8 - 0s - 44ms/step - loss: 5013325312.0000 - val_loss: 23336169472.0000 Epoch 52/150 8/8 - 0s - 44ms/step - loss: 6988819968.0000 - val_loss: 18803173376.0000 Epoch 53/150 8/8 - 0s - 44ms/step - loss: 6073098752.0000 - val_loss: 20673880064.0000 Epoch 54/150 8/8 - 0s - 44ms/step - loss: 6946417152.0000 - val_loss: 19777730560.0000 Epoch 55/150 8/8 - 0s - 44ms/step - loss: 5307472896.0000 - val_loss: 17794631680.0000 Epoch 56/150 8/8 - 0s - 44ms/step - loss: 4963123712.0000 - val_loss: 17786304512.0000 Epoch 57/150 8/8 - 0s - 44ms/step - loss: 4891612672.0000 - val_loss: 17979236352.0000 Epoch 58/150 8/8 - 0s - 44ms/step - loss: 4558477312.0000 - val_loss: 17798459392.0000 Epoch 59/150 8/8 - 0s - 44ms/step - loss: 4801461248.0000 - val_loss: 18605156352.0000 Epoch 60/150 8/8 - 0s - 44ms/step - loss: 4632326144.0000 - val_loss: 18636691456.0000 Epoch 61/150 8/8 - 0s - 44ms/step - loss: 7221147648.0000 - val_loss: 18749089792.0000 Epoch 62/150 8/8 - 0s - 44ms/step - loss: 6871911424.0000 - val_loss: 18182045696.0000 Epoch 63/150 8/8 - 0s - 44ms/step - loss: 5877418496.0000 - val_loss: 18186409984.0000 Epoch 64/150 8/8 - 0s - 44ms/step - loss: 5621193728.0000 - val_loss: 24608532480.0000 Epoch 65/150 8/8 - 0s - 44ms/step - loss: 6892570624.0000 - val_loss: 19988211712.0000 Epoch 66/150 8/8 - 0s - 44ms/step - loss: 5883388416.0000 - val_loss: 19565705216.0000 Epoch 67/150 8/8 - 0s - 44ms/step - loss: 5317914624.0000 - val_loss: 19195856896.0000 Epoch 68/150 8/8 - 0s - 45ms/step - loss: 5492926976.0000 - val_loss: 18788526080.0000 Epoch 69/150 8/8 - 0s - 44ms/step - loss: 5800559104.0000 - val_loss: 18961080320.0000 Epoch 70/150 8/8 - 0s - 44ms/step - loss: 4771607552.0000 - val_loss: 18856216576.0000 Epoch 71/150 8/8 - 0s - 44ms/step - loss: 4740642816.0000 - val_loss: 19100248064.0000 Epoch 72/150 8/8 - 0s - 44ms/step - loss: 4538798592.0000 - val_loss: 18112870400.0000 Epoch 73/150 8/8 - 0s - 44ms/step - loss: 5075757056.0000 - val_loss: 21610674176.0000 Epoch 74/150 8/8 - 0s - 44ms/step - loss: 5952310784.0000 - val_loss: 19308371968.0000 Epoch 75/150 8/8 - 0s - 44ms/step - loss: 5342662656.0000 - val_loss: 18504763392.0000 Epoch 75: early stopping ======================================= 3 Epoch 1/150 8/8 - 21s - 3s/step - loss: 333131808768.0000 - val_loss: 370968002560.0000 Epoch 2/150 8/8 - 0s - 56ms/step - loss: 343679827968.0000 - val_loss: 129755037696.0000 Epoch 3/150 8/8 - 0s - 43ms/step - loss: 206880374784.0000 - val_loss: 239132622848.0000 Epoch 4/150 8/8 - 0s - 43ms/step - loss: 141423919104.0000 - val_loss: 53438222336.0000 Epoch 5/150 8/8 - 0s - 44ms/step - loss: 132658012160.0000 - val_loss: 51341033472.0000 Epoch 6/150 8/8 - 0s - 44ms/step - loss: 65579847680.0000 - val_loss: 74466467840.0000 Epoch 7/150 8/8 - 0s - 44ms/step - loss: 31912544256.0000 - val_loss: 44194017280.0000 Epoch 8/150 8/8 - 0s - 43ms/step - loss: 22672590848.0000 - val_loss: 30234505216.0000 Epoch 9/150 8/8 - 0s - 44ms/step - loss: 16969053184.0000 - val_loss: 25548308480.0000 Epoch 10/150 8/8 - 0s - 43ms/step - loss: 15006684160.0000 - val_loss: 22143371264.0000 Epoch 11/150 8/8 - 0s - 43ms/step - loss: 12730932224.0000 - val_loss: 22571356160.0000 Epoch 12/150 8/8 - 0s - 43ms/step - loss: 12437800960.0000 - val_loss: 19680249856.0000 Epoch 13/150 8/8 - 0s - 43ms/step - loss: 12513068032.0000 - val_loss: 22539323392.0000 Epoch 14/150 8/8 - 0s - 45ms/step - loss: 12232555520.0000 - val_loss: 20198987776.0000 Epoch 15/150 8/8 - 0s - 44ms/step - loss: 10190428160.0000 - val_loss: 20771207168.0000 Epoch 16/150 8/8 - 0s - 43ms/step - loss: 11294713856.0000 - val_loss: 16391320576.0000 Epoch 17/150 8/8 - 0s - 44ms/step - loss: 9611409408.0000 - val_loss: 16413604864.0000 Epoch 18/150 8/8 - 0s - 43ms/step - loss: 10863264768.0000 - val_loss: 20356630528.0000 Epoch 19/150 8/8 - 0s - 44ms/step - loss: 9911248896.0000 - val_loss: 20044722176.0000 Epoch 20/150 8/8 - 0s - 44ms/step - loss: 10130038784.0000 - val_loss: 17788657664.0000 Epoch 21/150 8/8 - 0s - 43ms/step - loss: 10617415680.0000 - val_loss: 17314623488.0000 Epoch 22/150 8/8 - 0s - 44ms/step - loss: 9966184448.0000 - val_loss: 18128560128.0000 Epoch 23/150 8/8 - 0s - 43ms/step - loss: 10636024832.0000 - val_loss: 18197403648.0000 Epoch 24/150 8/8 - 0s - 44ms/step - loss: 9380912128.0000 - val_loss: 15288996864.0000 Epoch 25/150 8/8 - 0s - 44ms/step - loss: 8787020800.0000 - val_loss: 18786781184.0000 Epoch 26/150 8/8 - 0s - 44ms/step - loss: 10612093952.0000 - val_loss: 15099997184.0000 Epoch 27/150 8/8 - 0s - 44ms/step - loss: 12092460032.0000 - val_loss: 20376549376.0000 Epoch 28/150 8/8 - 0s - 44ms/step - loss: 9618940928.0000 - val_loss: 18043412480.0000 Epoch 29/150 8/8 - 0s - 44ms/step - loss: 7692622848.0000 - val_loss: 15368712192.0000 Epoch 30/150 8/8 - 0s - 44ms/step - loss: 7563075072.0000 - val_loss: 15547862016.0000 Epoch 31/150 8/8 - 0s - 44ms/step - loss: 7652174848.0000 - val_loss: 15095064576.0000 Epoch 32/150 8/8 - 0s - 44ms/step - loss: 9178969088.0000 - val_loss: 20978319360.0000 Epoch 33/150 8/8 - 0s - 44ms/step - loss: 8956332032.0000 - val_loss: 25532321792.0000 Epoch 34/150 8/8 - 0s - 44ms/step - loss: 11470585856.0000 - val_loss: 15840988160.0000 Epoch 35/150 8/8 - 0s - 44ms/step - loss: 7925054976.0000 - val_loss: 16660546560.0000 Epoch 36/150 8/8 - 0s - 44ms/step - loss: 8721024000.0000 - val_loss: 15629094912.0000 Epoch 37/150 8/8 - 0s - 43ms/step - loss: 8928829440.0000 - val_loss: 14865820672.0000 Epoch 38/150 8/8 - 0s - 44ms/step - loss: 6986847232.0000 - val_loss: 14541087744.0000 Epoch 39/150 8/8 - 0s - 44ms/step - loss: 7957356544.0000 - val_loss: 15079907328.0000 Epoch 40/150 8/8 - 0s - 44ms/step - loss: 6132814848.0000 - val_loss: 19793524736.0000 Epoch 41/150 8/8 - 0s - 44ms/step - loss: 9682405376.0000 - val_loss: 15511847936.0000 Epoch 42/150 8/8 - 0s - 45ms/step - loss: 7938184704.0000 - val_loss: 15161032704.0000 Epoch 43/150 8/8 - 0s - 44ms/step - loss: 11871593472.0000 - val_loss: 22093836288.0000 Epoch 44/150 8/8 - 0s - 44ms/step - loss: 10286960640.0000 - val_loss: 18485227520.0000 Epoch 45/150 8/8 - 0s - 44ms/step - loss: 14832917504.0000 - val_loss: 29872973824.0000 Epoch 46/150 8/8 - 0s - 44ms/step - loss: 13397815296.0000 - val_loss: 20867938304.0000 Epoch 47/150 8/8 - 0s - 43ms/step - loss: 10589310976.0000 - val_loss: 19198025728.0000 Epoch 48/150 8/8 - 0s - 44ms/step - loss: 9587639296.0000 - val_loss: 17917896704.0000 Epoch 49/150 8/8 - 0s - 44ms/step - loss: 8316842496.0000 - val_loss: 15841428480.0000 Epoch 50/150 8/8 - 0s - 44ms/step - loss: 7685756928.0000 - val_loss: 15254457344.0000 Epoch 51/150 8/8 - 0s - 44ms/step - loss: 7522620416.0000 - val_loss: 18847922176.0000 Epoch 52/150 8/8 - 0s - 44ms/step - loss: 7552425472.0000 - val_loss: 16646581248.0000 Epoch 53/150 8/8 - 0s - 44ms/step - loss: 10177015808.0000 - val_loss: 18371600384.0000 Epoch 54/150 8/8 - 0s - 44ms/step - loss: 8900449280.0000 - val_loss: 15992401920.0000 Epoch 55/150 8/8 - 0s - 44ms/step - loss: 6246259200.0000 - val_loss: 14747586560.0000 Epoch 56/150 8/8 - 0s - 44ms/step - loss: 6015745536.0000 - val_loss: 14402470912.0000 Epoch 57/150 8/8 - 0s - 44ms/step - loss: 6469605376.0000 - val_loss: 17437169664.0000 Epoch 58/150 8/8 - 0s - 44ms/step - loss: 8129166336.0000 - val_loss: 16658107392.0000 Epoch 59/150 8/8 - 0s - 44ms/step - loss: 8069288960.0000 - val_loss: 15207404544.0000 Epoch 60/150 8/8 - 0s - 44ms/step - loss: 9192803328.0000 - val_loss: 24142895104.0000 Epoch 61/150 8/8 - 0s - 44ms/step - loss: 8063428608.0000 - val_loss: 14136548352.0000 Epoch 62/150 8/8 - 0s - 44ms/step - loss: 6224045568.0000 - val_loss: 17265704960.0000 Epoch 63/150 8/8 - 0s - 44ms/step - loss: 6521044992.0000 - val_loss: 13302090752.0000 Epoch 64/150 8/8 - 0s - 44ms/step - loss: 6801262080.0000 - val_loss: 19809384448.0000 Epoch 65/150 8/8 - 0s - 44ms/step - loss: 8537410048.0000 - val_loss: 19630039040.0000 Epoch 66/150 8/8 - 0s - 44ms/step - loss: 6393229824.0000 - val_loss: 16643032064.0000 Epoch 67/150 8/8 - 0s - 44ms/step - loss: 5234432000.0000 - val_loss: 17820147712.0000 Epoch 68/150 8/8 - 0s - 44ms/step - loss: 8140945408.0000 - val_loss: 15458912256.0000 Epoch 69/150 8/8 - 0s - 44ms/step - loss: 8007631872.0000 - val_loss: 15876874240.0000 Epoch 70/150 8/8 - 0s - 45ms/step - loss: 6373708288.0000 - val_loss: 18539823104.0000 Epoch 71/150 8/8 - 0s - 44ms/step - loss: 6840059392.0000 - val_loss: 17326911488.0000 Epoch 72/150 8/8 - 0s - 46ms/step - loss: 10346519552.0000 - val_loss: 18447204352.0000 Epoch 73/150 8/8 - 0s - 47ms/step - loss: 7819506688.0000 - val_loss: 14696387584.0000 Epoch 74/150 8/8 - 0s - 45ms/step - loss: 6052153344.0000 - val_loss: 21267703808.0000 Epoch 75/150 8/8 - 0s - 45ms/step - loss: 7956389888.0000 - val_loss: 16388240384.0000 Epoch 76/150 8/8 - 0s - 45ms/step - loss: 7196965888.0000 - val_loss: 20423415808.0000 Epoch 77/150 8/8 - 0s - 44ms/step - loss: 5733415936.0000 - val_loss: 16421949440.0000 Epoch 78/150 8/8 - 0s - 44ms/step - loss: 7321218048.0000 - val_loss: 18719490048.0000 Epoch 79/150 8/8 - 0s - 44ms/step - loss: 7028423680.0000 - val_loss: 15059024896.0000 Epoch 80/150 8/8 - 0s - 44ms/step - loss: 5440313344.0000 - val_loss: 16120580096.0000 Epoch 81/150 8/8 - 0s - 44ms/step - loss: 4762501120.0000 - val_loss: 16071350272.0000 Epoch 82/150 8/8 - 0s - 44ms/step - loss: 5238790144.0000 - val_loss: 14154473472.0000 Epoch 83/150 8/8 - 0s - 44ms/step - loss: 5607762432.0000 - val_loss: 15561421824.0000 Epoch 84/150 8/8 - 0s - 44ms/step - loss: 4920118272.0000 - val_loss: 15625426944.0000 Epoch 85/150 8/8 - 0s - 44ms/step - loss: 5326107136.0000 - val_loss: 15925782528.0000 Epoch 86/150 8/8 - 0s - 45ms/step - loss: 4760399872.0000 - val_loss: 18455529472.0000 Epoch 87/150 8/8 - 0s - 44ms/step - loss: 5973572096.0000 - val_loss: 14240203776.0000 Epoch 88/150 8/8 - 0s - 44ms/step - loss: 5464611328.0000 - val_loss: 15258218496.0000 Epoch 88: early stopping ======================================= 4 Epoch 1/150 8/8 - 23s - 3s/step - loss: 332965412864.0000 - val_loss: 370958860288.0000 Epoch 2/150 8/8 - 0s - 55ms/step - loss: 311689969664.0000 - val_loss: 843722391552.0000 Epoch 3/150 8/8 - 0s - 44ms/step - loss: 183587209216.0000 - val_loss: 164074782720.0000 Epoch 4/150 8/8 - 0s - 43ms/step - loss: 105109323776.0000 - val_loss: 130946908160.0000 Epoch 5/150 8/8 - 0s - 46ms/step - loss: 70926548992.0000 - val_loss: 57204752384.0000 Epoch 6/150 8/8 - 0s - 44ms/step - loss: 38607491072.0000 - val_loss: 24503250944.0000 Epoch 7/150 8/8 - 0s - 44ms/step - loss: 34014586880.0000 - val_loss: 25708242944.0000 Epoch 8/150 8/8 - 0s - 44ms/step - loss: 24708632576.0000 - val_loss: 21385943040.0000 Epoch 9/150 8/8 - 0s - 45ms/step - loss: 21538289664.0000 - val_loss: 21273319424.0000 Epoch 10/150 8/8 - 0s - 44ms/step - loss: 17793763328.0000 - val_loss: 20731068416.0000 Epoch 11/150 8/8 - 0s - 44ms/step - loss: 14506426368.0000 - val_loss: 19487899648.0000 Epoch 12/150 8/8 - 0s - 44ms/step - loss: 13694493696.0000 - val_loss: 19452102656.0000 Epoch 13/150 8/8 - 0s - 44ms/step - loss: 12544518144.0000 - val_loss: 24227569664.0000 Epoch 14/150 8/8 - 0s - 44ms/step - loss: 11369886720.0000 - val_loss: 21263362048.0000 Epoch 15/150 8/8 - 0s - 44ms/step - loss: 8978388992.0000 - val_loss: 19227746304.0000 Epoch 16/150 8/8 - 1s - 77ms/step - loss: 9172440064.0000 - val_loss: 18490136576.0000 Epoch 17/150 8/8 - 0s - 44ms/step - loss: 7925522432.0000 - val_loss: 16481592320.0000 Epoch 18/150 8/8 - 0s - 44ms/step - loss: 8106087936.0000 - val_loss: 16990749696.0000 Epoch 19/150 8/8 - 0s - 44ms/step - loss: 10041021440.0000 - val_loss: 25744300032.0000 Epoch 20/150 8/8 - 0s - 44ms/step - loss: 9144858624.0000 - val_loss: 14402652160.0000 Epoch 21/150 8/8 - 0s - 44ms/step - loss: 9329552384.0000 - val_loss: 15419848704.0000 Epoch 22/150 8/8 - 0s - 43ms/step - loss: 7650539520.0000 - val_loss: 15872695296.0000 Epoch 23/150 8/8 - 0s - 44ms/step - loss: 7171934208.0000 - val_loss: 14374939648.0000 Epoch 24/150 8/8 - 0s - 44ms/step - loss: 7069203456.0000 - val_loss: 15751160832.0000 Epoch 25/150 8/8 - 0s - 46ms/step - loss: 7716983296.0000 - val_loss: 16544926720.0000 Epoch 26/150 8/8 - 0s - 44ms/step - loss: 8064707584.0000 - val_loss: 14972206080.0000 Epoch 27/150 8/8 - 0s - 43ms/step - loss: 8233238528.0000 - val_loss: 17242052608.0000 Epoch 28/150 8/8 - 0s - 44ms/step - loss: 7033746944.0000 - val_loss: 14383555584.0000 Epoch 29/150 8/8 - 0s - 44ms/step - loss: 7389596160.0000 - val_loss: 16510401536.0000 Epoch 30/150 8/8 - 0s - 44ms/step - loss: 8516790272.0000 - val_loss: 19883059200.0000 Epoch 31/150 8/8 - 0s - 43ms/step - loss: 10686635008.0000 - val_loss: 15531506688.0000 Epoch 32/150 8/8 - 0s - 44ms/step - loss: 7377322496.0000 - val_loss: 16119406592.0000 Epoch 33/150 8/8 - 0s - 44ms/step - loss: 9107068928.0000 - val_loss: 15687796736.0000 Epoch 34/150 8/8 - 0s - 44ms/step - loss: 7016641024.0000 - val_loss: 15215364096.0000 Epoch 35/150 8/8 - 0s - 44ms/step - loss: 6406585856.0000 - val_loss: 17459034112.0000 Epoch 36/150 8/8 - 0s - 44ms/step - loss: 7242393088.0000 - val_loss: 15672602624.0000 Epoch 37/150 8/8 - 0s - 44ms/step - loss: 7794547712.0000 - val_loss: 14953553920.0000 Epoch 38/150 8/8 - 0s - 44ms/step - loss: 7043517440.0000 - val_loss: 16215073792.0000 Epoch 39/150 8/8 - 0s - 44ms/step - loss: 6555794944.0000 - val_loss: 13372609536.0000 Epoch 40/150 8/8 - 0s - 44ms/step - loss: 6273844736.0000 - val_loss: 19161835520.0000 Epoch 41/150 8/8 - 0s - 44ms/step - loss: 8016130048.0000 - val_loss: 15376745472.0000 Epoch 42/150 8/8 - 0s - 45ms/step - loss: 6483491328.0000 - val_loss: 12808914944.0000 Epoch 43/150 8/8 - 0s - 44ms/step - loss: 6627310592.0000 - val_loss: 16744270848.0000 Epoch 44/150 8/8 - 0s - 44ms/step - loss: 5979522048.0000 - val_loss: 16069594112.0000 Epoch 45/150 8/8 - 0s - 45ms/step - loss: 7897920000.0000 - val_loss: 21201719296.0000 Epoch 46/150 8/8 - 0s - 44ms/step - loss: 8547323392.0000 - val_loss: 18554476544.0000 Epoch 47/150 8/8 - 0s - 44ms/step - loss: 8024240128.0000 - val_loss: 21111246848.0000 Epoch 48/150 8/8 - 0s - 44ms/step - loss: 9059536896.0000 - val_loss: 14481491968.0000 Epoch 49/150 8/8 - 0s - 44ms/step - loss: 7290400768.0000 - val_loss: 16314216448.0000 Epoch 50/150 8/8 - 0s - 44ms/step - loss: 7340284416.0000 - val_loss: 16863049728.0000 Epoch 51/150 8/8 - 0s - 44ms/step - loss: 6321382400.0000 - val_loss: 13021291520.0000 Epoch 52/150 8/8 - 0s - 44ms/step - loss: 6172838400.0000 - val_loss: 14301253632.0000 Epoch 53/150 8/8 - 0s - 46ms/step - loss: 6840600064.0000 - val_loss: 18244388864.0000 Epoch 54/150 8/8 - 0s - 45ms/step - loss: 8305925632.0000 - val_loss: 14192798720.0000 Epoch 55/150 8/8 - 0s - 44ms/step - loss: 8396320768.0000 - val_loss: 15081013248.0000 Epoch 56/150 8/8 - 0s - 44ms/step - loss: 6818115584.0000 - val_loss: 18243239936.0000 Epoch 57/150 8/8 - 0s - 44ms/step - loss: 6558855168.0000 - val_loss: 18747217920.0000 Epoch 58/150 8/8 - 0s - 44ms/step - loss: 6120394240.0000 - val_loss: 19211184128.0000 Epoch 59/150 8/8 - 0s - 44ms/step - loss: 10241474560.0000 - val_loss: 16295349248.0000 Epoch 60/150 8/8 - 0s - 44ms/step - loss: 6168457728.0000 - val_loss: 16996061184.0000 Epoch 61/150 8/8 - 0s - 44ms/step - loss: 5678909440.0000 - val_loss: 14814504960.0000 Epoch 62/150 8/8 - 0s - 44ms/step - loss: 5442418688.0000 - val_loss: 27306475520.0000 Epoch 63/150 8/8 - 0s - 44ms/step - loss: 10215317504.0000 - val_loss: 20827006976.0000 Epoch 64/150 8/8 - 0s - 44ms/step - loss: 10403002368.0000 - val_loss: 15504002048.0000 Epoch 65/150 8/8 - 0s - 44ms/step - loss: 5530212352.0000 - val_loss: 17414854656.0000 Epoch 66/150 8/8 - 0s - 44ms/step - loss: 5985874944.0000 - val_loss: 20766240768.0000 Epoch 67/150 8/8 - 0s - 45ms/step - loss: 8893236224.0000 - val_loss: 29824243712.0000 Epoch 67: early stopping Training finished...Loading the best model 2/2 ━━━━━━━━━━━━━━━━━━━━ 7s 3s/step Score (RMSE): 81763.8046875
In [84]:
chart_regression(pred_resNet2.flatten(),y_test)
In [85]:
print(model.summary())
plot_model(model, show_shapes=True)
Model: "functional_66"
┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┓ ┃ Layer (type) ┃ Output Shape ┃ Param # ┃ Connected to ┃ ┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━┩ │ input_layer_67 │ (None, 128, 128, │ 0 │ - │ │ (InputLayer) │ 3) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv1_pad │ (None, 134, 134, │ 0 │ input_layer_67[0… │ │ (ZeroPadding2D) │ 3) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv1_conv (Conv2D) │ (None, 64, 64, │ 9,472 │ conv1_pad[0][0] │ │ │ 64) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv1_bn │ (None, 64, 64, │ 256 │ conv1_conv[0][0] │ │ (BatchNormalizatio… │ 64) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv1_relu │ (None, 64, 64, │ 0 │ conv1_bn[0][0] │ │ (Activation) │ 64) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ pool1_pad │ (None, 66, 66, │ 0 │ conv1_relu[0][0] │ │ (ZeroPadding2D) │ 64) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ pool1_pool │ (None, 32, 32, │ 0 │ pool1_pad[0][0] │ │ (MaxPooling2D) │ 64) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block1_1_conv │ (None, 32, 32, │ 4,160 │ pool1_pool[0][0] │ │ (Conv2D) │ 64) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block1_1_bn │ (None, 32, 32, │ 256 │ conv2_block1_1_c… │ │ (BatchNormalizatio… │ 64) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block1_1_relu │ (None, 32, 32, │ 0 │ conv2_block1_1_b… │ │ (Activation) │ 64) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block1_2_conv │ (None, 32, 32, │ 36,928 │ conv2_block1_1_r… │ │ (Conv2D) │ 64) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block1_2_bn │ (None, 32, 32, │ 256 │ conv2_block1_2_c… │ │ (BatchNormalizatio… │ 64) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block1_2_relu │ (None, 32, 32, │ 0 │ conv2_block1_2_b… │ │ (Activation) │ 64) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block1_0_conv │ (None, 32, 32, │ 16,640 │ pool1_pool[0][0] │ │ (Conv2D) │ 256) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block1_3_conv │ (None, 32, 32, │ 16,640 │ conv2_block1_2_r… │ │ (Conv2D) │ 256) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block1_0_bn │ (None, 32, 32, │ 1,024 │ conv2_block1_0_c… │ │ (BatchNormalizatio… │ 256) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block1_3_bn │ (None, 32, 32, │ 1,024 │ conv2_block1_3_c… │ │ (BatchNormalizatio… │ 256) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block1_add │ (None, 32, 32, │ 0 │ conv2_block1_0_b… │ │ (Add) │ 256) │ │ conv2_block1_3_b… │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block1_out │ (None, 32, 32, │ 0 │ conv2_block1_add… │ │ (Activation) │ 256) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block2_1_conv │ (None, 32, 32, │ 16,448 │ conv2_block1_out… │ │ (Conv2D) │ 64) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block2_1_bn │ (None, 32, 32, │ 256 │ conv2_block2_1_c… │ │ (BatchNormalizatio… │ 64) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block2_1_relu │ (None, 32, 32, │ 0 │ conv2_block2_1_b… │ │ (Activation) │ 64) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block2_2_conv │ (None, 32, 32, │ 36,928 │ conv2_block2_1_r… │ │ (Conv2D) │ 64) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block2_2_bn │ (None, 32, 32, │ 256 │ conv2_block2_2_c… │ │ (BatchNormalizatio… │ 64) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block2_2_relu │ (None, 32, 32, │ 0 │ conv2_block2_2_b… │ │ (Activation) │ 64) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block2_3_conv │ (None, 32, 32, │ 16,640 │ conv2_block2_2_r… │ │ (Conv2D) │ 256) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block2_3_bn │ (None, 32, 32, │ 1,024 │ conv2_block2_3_c… │ │ (BatchNormalizatio… │ 256) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block2_add │ (None, 32, 32, │ 0 │ conv2_block1_out… │ │ (Add) │ 256) │ │ conv2_block2_3_b… │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block2_out │ (None, 32, 32, │ 0 │ conv2_block2_add… │ │ (Activation) │ 256) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block3_1_conv │ (None, 32, 32, │ 16,448 │ conv2_block2_out… │ │ (Conv2D) │ 64) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block3_1_bn │ (None, 32, 32, │ 256 │ conv2_block3_1_c… │ │ (BatchNormalizatio… │ 64) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block3_1_relu │ (None, 32, 32, │ 0 │ conv2_block3_1_b… │ │ (Activation) │ 64) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block3_2_conv │ (None, 32, 32, │ 36,928 │ conv2_block3_1_r… │ │ (Conv2D) │ 64) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block3_2_bn │ (None, 32, 32, │ 256 │ conv2_block3_2_c… │ │ (BatchNormalizatio… │ 64) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block3_2_relu │ (None, 32, 32, │ 0 │ conv2_block3_2_b… │ │ (Activation) │ 64) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block3_3_conv │ (None, 32, 32, │ 16,640 │ conv2_block3_2_r… │ │ (Conv2D) │ 256) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block3_3_bn │ (None, 32, 32, │ 1,024 │ conv2_block3_3_c… │ │ (BatchNormalizatio… │ 256) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block3_add │ (None, 32, 32, │ 0 │ conv2_block2_out… │ │ (Add) │ 256) │ │ conv2_block3_3_b… │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2_block3_out │ (None, 32, 32, │ 0 │ conv2_block3_add… │ │ (Activation) │ 256) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block1_1_conv │ (None, 16, 16, │ 32,896 │ conv2_block3_out… │ │ (Conv2D) │ 128) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block1_1_bn │ (None, 16, 16, │ 512 │ conv3_block1_1_c… │ │ (BatchNormalizatio… │ 128) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block1_1_relu │ (None, 16, 16, │ 0 │ conv3_block1_1_b… │ │ (Activation) │ 128) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block1_2_conv │ (None, 16, 16, │ 147,584 │ conv3_block1_1_r… │ │ (Conv2D) │ 128) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block1_2_bn │ (None, 16, 16, │ 512 │ conv3_block1_2_c… │ │ (BatchNormalizatio… │ 128) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block1_2_relu │ (None, 16, 16, │ 0 │ conv3_block1_2_b… │ │ (Activation) │ 128) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block1_0_conv │ (None, 16, 16, │ 131,584 │ conv2_block3_out… │ │ (Conv2D) │ 512) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block1_3_conv │ (None, 16, 16, │ 66,048 │ conv3_block1_2_r… │ │ (Conv2D) │ 512) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block1_0_bn │ (None, 16, 16, │ 2,048 │ conv3_block1_0_c… │ │ (BatchNormalizatio… │ 512) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block1_3_bn │ (None, 16, 16, │ 2,048 │ conv3_block1_3_c… │ │ (BatchNormalizatio… │ 512) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block1_add │ (None, 16, 16, │ 0 │ conv3_block1_0_b… │ │ (Add) │ 512) │ │ conv3_block1_3_b… │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block1_out │ (None, 16, 16, │ 0 │ conv3_block1_add… │ │ (Activation) │ 512) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block2_1_conv │ (None, 16, 16, │ 65,664 │ conv3_block1_out… │ │ (Conv2D) │ 128) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block2_1_bn │ (None, 16, 16, │ 512 │ conv3_block2_1_c… │ │ (BatchNormalizatio… │ 128) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block2_1_relu │ (None, 16, 16, │ 0 │ conv3_block2_1_b… │ │ (Activation) │ 128) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block2_2_conv │ (None, 16, 16, │ 147,584 │ conv3_block2_1_r… │ │ (Conv2D) │ 128) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block2_2_bn │ (None, 16, 16, │ 512 │ conv3_block2_2_c… │ │ (BatchNormalizatio… │ 128) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block2_2_relu │ (None, 16, 16, │ 0 │ conv3_block2_2_b… │ │ (Activation) │ 128) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block2_3_conv │ (None, 16, 16, │ 66,048 │ conv3_block2_2_r… │ │ (Conv2D) │ 512) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block2_3_bn │ (None, 16, 16, │ 2,048 │ conv3_block2_3_c… │ │ (BatchNormalizatio… │ 512) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block2_add │ (None, 16, 16, │ 0 │ conv3_block1_out… │ │ (Add) │ 512) │ │ conv3_block2_3_b… │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block2_out │ (None, 16, 16, │ 0 │ conv3_block2_add… │ │ (Activation) │ 512) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block3_1_conv │ (None, 16, 16, │ 65,664 │ conv3_block2_out… │ │ (Conv2D) │ 128) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block3_1_bn │ (None, 16, 16, │ 512 │ conv3_block3_1_c… │ │ (BatchNormalizatio… │ 128) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block3_1_relu │ (None, 16, 16, │ 0 │ conv3_block3_1_b… │ │ (Activation) │ 128) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block3_2_conv │ (None, 16, 16, │ 147,584 │ conv3_block3_1_r… │ │ (Conv2D) │ 128) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block3_2_bn │ (None, 16, 16, │ 512 │ conv3_block3_2_c… │ │ (BatchNormalizatio… │ 128) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block3_2_relu │ (None, 16, 16, │ 0 │ conv3_block3_2_b… │ │ (Activation) │ 128) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block3_3_conv │ (None, 16, 16, │ 66,048 │ conv3_block3_2_r… │ │ (Conv2D) │ 512) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block3_3_bn │ (None, 16, 16, │ 2,048 │ conv3_block3_3_c… │ │ (BatchNormalizatio… │ 512) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block3_add │ (None, 16, 16, │ 0 │ conv3_block2_out… │ │ (Add) │ 512) │ │ conv3_block3_3_b… │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block3_out │ (None, 16, 16, │ 0 │ conv3_block3_add… │ │ (Activation) │ 512) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block4_1_conv │ (None, 16, 16, │ 65,664 │ conv3_block3_out… │ │ (Conv2D) │ 128) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block4_1_bn │ (None, 16, 16, │ 512 │ conv3_block4_1_c… │ │ (BatchNormalizatio… │ 128) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block4_1_relu │ (None, 16, 16, │ 0 │ conv3_block4_1_b… │ │ (Activation) │ 128) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block4_2_conv │ (None, 16, 16, │ 147,584 │ conv3_block4_1_r… │ │ (Conv2D) │ 128) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block4_2_bn │ (None, 16, 16, │ 512 │ conv3_block4_2_c… │ │ (BatchNormalizatio… │ 128) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block4_2_relu │ (None, 16, 16, │ 0 │ conv3_block4_2_b… │ │ (Activation) │ 128) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block4_3_conv │ (None, 16, 16, │ 66,048 │ conv3_block4_2_r… │ │ (Conv2D) │ 512) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block4_3_bn │ (None, 16, 16, │ 2,048 │ conv3_block4_3_c… │ │ (BatchNormalizatio… │ 512) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block4_add │ (None, 16, 16, │ 0 │ conv3_block3_out… │ │ (Add) │ 512) │ │ conv3_block4_3_b… │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv3_block4_out │ (None, 16, 16, │ 0 │ conv3_block4_add… │ │ (Activation) │ 512) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block1_1_conv │ (None, 8, 8, 256) │ 131,328 │ conv3_block4_out… │ │ (Conv2D) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block1_1_bn │ (None, 8, 8, 256) │ 1,024 │ conv4_block1_1_c… │ │ (BatchNormalizatio… │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block1_1_relu │ (None, 8, 8, 256) │ 0 │ conv4_block1_1_b… │ │ (Activation) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block1_2_conv │ (None, 8, 8, 256) │ 590,080 │ conv4_block1_1_r… │ │ (Conv2D) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block1_2_bn │ (None, 8, 8, 256) │ 1,024 │ conv4_block1_2_c… │ │ (BatchNormalizatio… │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block1_2_relu │ (None, 8, 8, 256) │ 0 │ conv4_block1_2_b… │ │ (Activation) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block1_0_conv │ (None, 8, 8, │ 525,312 │ conv3_block4_out… │ │ (Conv2D) │ 1024) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block1_3_conv │ (None, 8, 8, │ 263,168 │ conv4_block1_2_r… │ │ (Conv2D) │ 1024) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block1_0_bn │ (None, 8, 8, │ 4,096 │ conv4_block1_0_c… │ │ (BatchNormalizatio… │ 1024) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block1_3_bn │ (None, 8, 8, │ 4,096 │ conv4_block1_3_c… │ │ (BatchNormalizatio… │ 1024) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block1_add │ (None, 8, 8, │ 0 │ conv4_block1_0_b… │ │ (Add) │ 1024) │ │ conv4_block1_3_b… │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block1_out │ (None, 8, 8, │ 0 │ conv4_block1_add… │ │ (Activation) │ 1024) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block2_1_conv │ (None, 8, 8, 256) │ 262,400 │ conv4_block1_out… │ │ (Conv2D) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block2_1_bn │ (None, 8, 8, 256) │ 1,024 │ conv4_block2_1_c… │ │ (BatchNormalizatio… │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block2_1_relu │ (None, 8, 8, 256) │ 0 │ conv4_block2_1_b… │ │ (Activation) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block2_2_conv │ (None, 8, 8, 256) │ 590,080 │ conv4_block2_1_r… │ │ (Conv2D) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block2_2_bn │ (None, 8, 8, 256) │ 1,024 │ conv4_block2_2_c… │ │ (BatchNormalizatio… │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block2_2_relu │ (None, 8, 8, 256) │ 0 │ conv4_block2_2_b… │ │ (Activation) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block2_3_conv │ (None, 8, 8, │ 263,168 │ conv4_block2_2_r… │ │ (Conv2D) │ 1024) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block2_3_bn │ (None, 8, 8, │ 4,096 │ conv4_block2_3_c… │ │ (BatchNormalizatio… │ 1024) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block2_add │ (None, 8, 8, │ 0 │ conv4_block1_out… │ │ (Add) │ 1024) │ │ conv4_block2_3_b… │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block2_out │ (None, 8, 8, │ 0 │ conv4_block2_add… │ │ (Activation) │ 1024) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block3_1_conv │ (None, 8, 8, 256) │ 262,400 │ conv4_block2_out… │ │ (Conv2D) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block3_1_bn │ (None, 8, 8, 256) │ 1,024 │ conv4_block3_1_c… │ │ (BatchNormalizatio… │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block3_1_relu │ (None, 8, 8, 256) │ 0 │ conv4_block3_1_b… │ │ (Activation) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block3_2_conv │ (None, 8, 8, 256) │ 590,080 │ conv4_block3_1_r… │ │ (Conv2D) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block3_2_bn │ (None, 8, 8, 256) │ 1,024 │ conv4_block3_2_c… │ │ (BatchNormalizatio… │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block3_2_relu │ (None, 8, 8, 256) │ 0 │ conv4_block3_2_b… │ │ (Activation) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block3_3_conv │ (None, 8, 8, │ 263,168 │ conv4_block3_2_r… │ │ (Conv2D) │ 1024) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block3_3_bn │ (None, 8, 8, │ 4,096 │ conv4_block3_3_c… │ │ (BatchNormalizatio… │ 1024) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block3_add │ (None, 8, 8, │ 0 │ conv4_block2_out… │ │ (Add) │ 1024) │ │ conv4_block3_3_b… │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block3_out │ (None, 8, 8, │ 0 │ conv4_block3_add… │ │ (Activation) │ 1024) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block4_1_conv │ (None, 8, 8, 256) │ 262,400 │ conv4_block3_out… │ │ (Conv2D) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block4_1_bn │ (None, 8, 8, 256) │ 1,024 │ conv4_block4_1_c… │ │ (BatchNormalizatio… │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block4_1_relu │ (None, 8, 8, 256) │ 0 │ conv4_block4_1_b… │ │ (Activation) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block4_2_conv │ (None, 8, 8, 256) │ 590,080 │ conv4_block4_1_r… │ │ (Conv2D) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block4_2_bn │ (None, 8, 8, 256) │ 1,024 │ conv4_block4_2_c… │ │ (BatchNormalizatio… │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block4_2_relu │ (None, 8, 8, 256) │ 0 │ conv4_block4_2_b… │ │ (Activation) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block4_3_conv │ (None, 8, 8, │ 263,168 │ conv4_block4_2_r… │ │ (Conv2D) │ 1024) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block4_3_bn │ (None, 8, 8, │ 4,096 │ conv4_block4_3_c… │ │ (BatchNormalizatio… │ 1024) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block4_add │ (None, 8, 8, │ 0 │ conv4_block3_out… │ │ (Add) │ 1024) │ │ conv4_block4_3_b… │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block4_out │ (None, 8, 8, │ 0 │ conv4_block4_add… │ │ (Activation) │ 1024) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block5_1_conv │ (None, 8, 8, 256) │ 262,400 │ conv4_block4_out… │ │ (Conv2D) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block5_1_bn │ (None, 8, 8, 256) │ 1,024 │ conv4_block5_1_c… │ │ (BatchNormalizatio… │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block5_1_relu │ (None, 8, 8, 256) │ 0 │ conv4_block5_1_b… │ │ (Activation) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block5_2_conv │ (None, 8, 8, 256) │ 590,080 │ conv4_block5_1_r… │ │ (Conv2D) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block5_2_bn │ (None, 8, 8, 256) │ 1,024 │ conv4_block5_2_c… │ │ (BatchNormalizatio… │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block5_2_relu │ (None, 8, 8, 256) │ 0 │ conv4_block5_2_b… │ │ (Activation) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block5_3_conv │ (None, 8, 8, │ 263,168 │ conv4_block5_2_r… │ │ (Conv2D) │ 1024) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block5_3_bn │ (None, 8, 8, │ 4,096 │ conv4_block5_3_c… │ │ (BatchNormalizatio… │ 1024) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block5_add │ (None, 8, 8, │ 0 │ conv4_block4_out… │ │ (Add) │ 1024) │ │ conv4_block5_3_b… │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block5_out │ (None, 8, 8, │ 0 │ conv4_block5_add… │ │ (Activation) │ 1024) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block6_1_conv │ (None, 8, 8, 256) │ 262,400 │ conv4_block5_out… │ │ (Conv2D) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block6_1_bn │ (None, 8, 8, 256) │ 1,024 │ conv4_block6_1_c… │ │ (BatchNormalizatio… │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block6_1_relu │ (None, 8, 8, 256) │ 0 │ conv4_block6_1_b… │ │ (Activation) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block6_2_conv │ (None, 8, 8, 256) │ 590,080 │ conv4_block6_1_r… │ │ (Conv2D) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block6_2_bn │ (None, 8, 8, 256) │ 1,024 │ conv4_block6_2_c… │ │ (BatchNormalizatio… │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block6_2_relu │ (None, 8, 8, 256) │ 0 │ conv4_block6_2_b… │ │ (Activation) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block6_3_conv │ (None, 8, 8, │ 263,168 │ conv4_block6_2_r… │ │ (Conv2D) │ 1024) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block6_3_bn │ (None, 8, 8, │ 4,096 │ conv4_block6_3_c… │ │ (BatchNormalizatio… │ 1024) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block6_add │ (None, 8, 8, │ 0 │ conv4_block5_out… │ │ (Add) │ 1024) │ │ conv4_block6_3_b… │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv4_block6_out │ (None, 8, 8, │ 0 │ conv4_block6_add… │ │ (Activation) │ 1024) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block1_1_conv │ (None, 4, 4, 512) │ 524,800 │ conv4_block6_out… │ │ (Conv2D) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block1_1_bn │ (None, 4, 4, 512) │ 2,048 │ conv5_block1_1_c… │ │ (BatchNormalizatio… │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block1_1_relu │ (None, 4, 4, 512) │ 0 │ conv5_block1_1_b… │ │ (Activation) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block1_2_conv │ (None, 4, 4, 512) │ 2,359,808 │ conv5_block1_1_r… │ │ (Conv2D) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block1_2_bn │ (None, 4, 4, 512) │ 2,048 │ conv5_block1_2_c… │ │ (BatchNormalizatio… │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block1_2_relu │ (None, 4, 4, 512) │ 0 │ conv5_block1_2_b… │ │ (Activation) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block1_0_conv │ (None, 4, 4, │ 2,099,200 │ conv4_block6_out… │ │ (Conv2D) │ 2048) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block1_3_conv │ (None, 4, 4, │ 1,050,624 │ conv5_block1_2_r… │ │ (Conv2D) │ 2048) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block1_0_bn │ (None, 4, 4, │ 8,192 │ conv5_block1_0_c… │ │ (BatchNormalizatio… │ 2048) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block1_3_bn │ (None, 4, 4, │ 8,192 │ conv5_block1_3_c… │ │ (BatchNormalizatio… │ 2048) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block1_add │ (None, 4, 4, │ 0 │ conv5_block1_0_b… │ │ (Add) │ 2048) │ │ conv5_block1_3_b… │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block1_out │ (None, 4, 4, │ 0 │ conv5_block1_add… │ │ (Activation) │ 2048) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block2_1_conv │ (None, 4, 4, 512) │ 1,049,088 │ conv5_block1_out… │ │ (Conv2D) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block2_1_bn │ (None, 4, 4, 512) │ 2,048 │ conv5_block2_1_c… │ │ (BatchNormalizatio… │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block2_1_relu │ (None, 4, 4, 512) │ 0 │ conv5_block2_1_b… │ │ (Activation) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block2_2_conv │ (None, 4, 4, 512) │ 2,359,808 │ conv5_block2_1_r… │ │ (Conv2D) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block2_2_bn │ (None, 4, 4, 512) │ 2,048 │ conv5_block2_2_c… │ │ (BatchNormalizatio… │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block2_2_relu │ (None, 4, 4, 512) │ 0 │ conv5_block2_2_b… │ │ (Activation) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block2_3_conv │ (None, 4, 4, │ 1,050,624 │ conv5_block2_2_r… │ │ (Conv2D) │ 2048) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block2_3_bn │ (None, 4, 4, │ 8,192 │ conv5_block2_3_c… │ │ (BatchNormalizatio… │ 2048) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block2_add │ (None, 4, 4, │ 0 │ conv5_block1_out… │ │ (Add) │ 2048) │ │ conv5_block2_3_b… │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block2_out │ (None, 4, 4, │ 0 │ conv5_block2_add… │ │ (Activation) │ 2048) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block3_1_conv │ (None, 4, 4, 512) │ 1,049,088 │ conv5_block2_out… │ │ (Conv2D) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block3_1_bn │ (None, 4, 4, 512) │ 2,048 │ conv5_block3_1_c… │ │ (BatchNormalizatio… │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block3_1_relu │ (None, 4, 4, 512) │ 0 │ conv5_block3_1_b… │ │ (Activation) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block3_2_conv │ (None, 4, 4, 512) │ 2,359,808 │ conv5_block3_1_r… │ │ (Conv2D) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block3_2_bn │ (None, 4, 4, 512) │ 2,048 │ conv5_block3_2_c… │ │ (BatchNormalizatio… │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block3_2_relu │ (None, 4, 4, 512) │ 0 │ conv5_block3_2_b… │ │ (Activation) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block3_3_conv │ (None, 4, 4, │ 1,050,624 │ conv5_block3_2_r… │ │ (Conv2D) │ 2048) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block3_3_bn │ (None, 4, 4, │ 8,192 │ conv5_block3_3_c… │ │ (BatchNormalizatio… │ 2048) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ input_layer_68 │ (None, 35) │ 0 │ - │ │ (InputLayer) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block3_add │ (None, 4, 4, │ 0 │ conv5_block2_out… │ │ (Add) │ 2048) │ │ conv5_block3_3_b… │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ dense_220 (Dense) │ (None, 512) │ 18,432 │ input_layer_68[0… │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv5_block3_out │ (None, 4, 4, │ 0 │ conv5_block3_add… │ │ (Activation) │ 2048) │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ dense_221 (Dense) │ (None, 1024) │ 525,312 │ dense_220[0][0] │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ conv2d_49 (Conv2D) │ (None, 1, 1, 128) │ 4,194,432 │ conv5_block3_out… │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ dense_222 (Dense) │ (None, 2048) │ 2,099,200 │ dense_221[0][0] │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ flatten_33 │ (None, 128) │ 0 │ conv2d_49[0][0] │ │ (Flatten) │ │ │ │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ dense_223 (Dense) │ (None, 512) │ 1,049,088 │ dense_222[0][0] │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ concatenate_33 │ (None, 640) │ 0 │ flatten_33[0][0], │ │ (Concatenate) │ │ │ dense_223[0][0] │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ dense_224 (Dense) │ (None, 512) │ 328,192 │ concatenate_33[0… │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ dense_225 (Dense) │ (None, 1024) │ 525,312 │ dense_224[0][0] │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ dense_226 (Dense) │ (None, 2048) │ 2,099,200 │ dense_225[0][0] │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ dense_227 (Dense) │ (None, 2048) │ 4,196,352 │ dense_226[0][0] │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ dense_228 (Dense) │ (None, 512) │ 1,049,088 │ dense_227[0][0] │ ├─────────────────────┼───────────────────┼────────────┼───────────────────┤ │ dense_229 (Dense) │ (None, 1) │ 513 │ dense_228[0][0] │ └─────────────────────┴───────────────────┴────────────┴───────────────────┘
Total params: 71,843,077 (274.06 MB)
Trainable params: 16,085,121 (61.36 MB)
Non-trainable params: 23,587,712 (89.98 MB)
Optimizer params: 32,170,244 (122.72 MB)
None
Out[85]:
Transfer Learning : ResNet #3¶
In [100]:
adam_optimizer = Adam(learning_rate=0.0075, beta_1=0.9, beta_2=0.999, epsilon=1e-07)
sgd_optimizer = SGD(learning_rate=1e-4 , momentum=0.9, nesterov=True)
filepath = './dnn/best_weights-resNet3.keras'
checkpointer = ModelCheckpoint(filepath=filepath, verbose=0, save_best_only=True)
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=25, verbose=1, mode='auto')
resNet = ResNet50(weights='imagenet', include_top=False, input_shape=(128, 128, 3))
for layer in resNet.layers:
layer.trainable = True
conv_layer = Conv2D(128, kernel_size=4, activation='relu')(resNet.output)
flatten_res = Flatten()(conv_layer)
visible1 = resNet.input
# Tabular data input model
visible2 = Input(shape=(x.shape[1],))
dense1 = Dense(512, activation='relu')(visible2)
dense2 = Dense(1024, activation='relu')(dense1)
dense3 = Dense(2048, activation='relu')(dense2)
# dense3 = Dropout(0.1)(dense3)
dense4 = Dense(512, activation='relu')(dense3)
merge = concatenate([flatten_res, dense4])
hidden1 = Dense(512, activation='relu')(merge)
hidden2 = Dense(1024, activation='relu')(hidden1)
hidden3 = Dense(2048, activation='relu')(hidden2)
# hidden3 = Dropout(0.2)(hidden3)
hidden4 = Dense(2048, activation='relu')(hidden3)
hidden5 = Dense(512, activation='relu')(hidden4)
output = Dense(1)(hidden5)
model = Model(inputs=[visible1, visible2], outputs=output)
# print(model.summary())
model.compile(loss='mean_squared_error', optimizer=adam_optimizer)
model.fit([x_train_img, x_train], y_train,
validation_data=([x_test_img, x_test], y_test),
callbacks=[monitor, checkpointer],verbose=2,epochs=150)
print('Training finished...Loading the best model')
print()
model.load_weights(filepath) # load weights from best model
pred_resNet3 = model.predict([x_test_img, x_test])
score_resNet3 = np.sqrt(metrics.mean_squared_error(pred_resNet3,y_test))
print("Score (RMSE): {}".format(score_resNet3))
Epoch 1/150 8/8 - 81s - 10s/step - loss: 21457895686144.0000 - val_loss: inf Epoch 2/150 8/8 - 1s - 129ms/step - loss: 334170357760.0000 - val_loss: inf Epoch 3/150 8/8 - 1s - 116ms/step - loss: 207081947136.0000 - val_loss: inf Epoch 4/150 8/8 - 3s - 409ms/step - loss: 168297611264.0000 - val_loss: 9808948503951855215782212646469632.0000 Epoch 5/150 8/8 - 5s - 590ms/step - loss: 110809612288.0000 - val_loss: 11863660548948005796769840496640.0000 Epoch 6/150 8/8 - 4s - 542ms/step - loss: 91185160192.0000 - val_loss: 52509409632972446024649932800.0000 Epoch 7/150 8/8 - 3s - 427ms/step - loss: 51178123264.0000 - val_loss: 510849004047171880621178880.0000 Epoch 8/150 8/8 - 3s - 426ms/step - loss: 27910414336.0000 - val_loss: 7019695166771817600778240.0000 Epoch 9/150 8/8 - 4s - 501ms/step - loss: 21446799360.0000 - val_loss: 192468013417525164376064.0000 Epoch 10/150 8/8 - 3s - 419ms/step - loss: 13124233216.0000 - val_loss: 4390464982606298480640.0000 Epoch 11/150 8/8 - 4s - 467ms/step - loss: 9879970816.0000 - val_loss: 52687576856235343872.0000 Epoch 12/150 8/8 - 4s - 548ms/step - loss: 9667404800.0000 - val_loss: 1676710702137475072.0000 Epoch 13/150 8/8 - 3s - 412ms/step - loss: 9983817728.0000 - val_loss: 36128839692189696.0000 Epoch 14/150 8/8 - 4s - 457ms/step - loss: 5374980096.0000 - val_loss: 2002834652397568.0000 Epoch 15/150 8/8 - 4s - 486ms/step - loss: 4259698176.0000 - val_loss: 25525296824320.0000 Epoch 16/150 8/8 - 3s - 414ms/step - loss: 3630961408.0000 - val_loss: 31788214272.0000 Epoch 17/150 8/8 - 1s - 127ms/step - loss: 3633146368.0000 - val_loss: 56455737344.0000 Epoch 18/150 8/8 - 1s - 118ms/step - loss: 5066958336.0000 - val_loss: 83259056128.0000 Epoch 19/150 8/8 - 1s - 118ms/step - loss: 3359559424.0000 - val_loss: 82531827712.0000 Epoch 20/150 8/8 - 1s - 118ms/step - loss: 3628035328.0000 - val_loss: 67713249280.0000 Epoch 21/150 8/8 - 1s - 119ms/step - loss: 2291453440.0000 - val_loss: 78411513856.0000 Epoch 22/150 8/8 - 1s - 118ms/step - loss: 1577149568.0000 - val_loss: 70351028224.0000 Epoch 23/150 8/8 - 1s - 118ms/step - loss: 2162359552.0000 - val_loss: 64758988800.0000 Epoch 24/150 8/8 - 1s - 118ms/step - loss: 2188599552.0000 - val_loss: 67543506944.0000 Epoch 25/150 8/8 - 1s - 118ms/step - loss: 1397987200.0000 - val_loss: 72141430784.0000 Epoch 26/150 8/8 - 1s - 118ms/step - loss: 1897369856.0000 - val_loss: 57386754048.0000 Epoch 27/150 8/8 - 1s - 119ms/step - loss: 1355569024.0000 - val_loss: 43981090816.0000 Epoch 28/150 8/8 - 1s - 119ms/step - loss: 3084919296.0000 - val_loss: 62863466496.0000 Epoch 29/150 8/8 - 1s - 119ms/step - loss: 2202707200.0000 - val_loss: 65443311616.0000 Epoch 30/150 8/8 - 1s - 118ms/step - loss: 1487263616.0000 - val_loss: 52634767360.0000 Epoch 31/150 8/8 - 1s - 118ms/step - loss: 1208833408.0000 - val_loss: 54866755584.0000 Epoch 32/150 8/8 - 1s - 119ms/step - loss: 1100148736.0000 - val_loss: 51140866048.0000 Epoch 33/150 8/8 - 1s - 120ms/step - loss: 1479528192.0000 - val_loss: 57578721280.0000 Epoch 34/150 8/8 - 1s - 119ms/step - loss: 1136723456.0000 - val_loss: 52306075648.0000 Epoch 35/150 8/8 - 1s - 119ms/step - loss: 1494099584.0000 - val_loss: 54254084096.0000 Epoch 36/150 8/8 - 1s - 119ms/step - loss: 738546368.0000 - val_loss: 52105428992.0000 Epoch 37/150 8/8 - 1s - 120ms/step - loss: 1321481728.0000 - val_loss: 54827958272.0000 Epoch 38/150 8/8 - 1s - 119ms/step - loss: 1011561344.0000 - val_loss: 46396653568.0000 Epoch 39/150 8/8 - 1s - 119ms/step - loss: 832796672.0000 - val_loss: 52856401920.0000 Epoch 40/150 8/8 - 1s - 119ms/step - loss: 1141399168.0000 - val_loss: 49522290688.0000 Epoch 41/150 8/8 - 1s - 121ms/step - loss: 1342953472.0000 - val_loss: 60634574848.0000 Epoch 41: early stopping Training finished...Loading the best model 2/2 ━━━━━━━━━━━━━━━━━━━━ 7s 3s/step Score (RMSE): 178292.515625